prompt
stringlengths
162
4.26M
response
stringlengths
109
5.16M
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_15( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [4:0] io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [3:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [3:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [31:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7] wire [3:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [3:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire [4:0] io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7] wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire [8:0] c_first_beats1_decode = 9'h0; // @[Edges.scala:220:59] wire [8:0] c_first_beats1 = 9'h0; // @[Edges.scala:221:14] wire [8:0] _c_first_count_T = 9'h0; // @[Edges.scala:234:27] wire [8:0] c_first_count = 9'h0; // @[Edges.scala:234:25] wire [8:0] _c_first_counter_T = 9'h0; // @[Edges.scala:236:21] wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_4 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_10 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_14 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_16 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_20 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_22 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_28 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_30 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_34 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_36 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_40 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_42 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_46 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_48 = 1'h1; // @[Parameters.scala:57:20] wire sink_ok = 1'h1; // @[Monitor.scala:309:31] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire [8:0] c_first_counter1 = 9'h1FF; // @[Edges.scala:230:28] wire [9:0] _c_first_counter1_T = 10'h3FF; // @[Edges.scala:230:28] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] c_opcodes_set = 64'h0; // @[Monitor.scala:740:34] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [31:0] _c_first_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_first_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_first_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_first_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_set_wo_ready_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_set_wo_ready_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_opcodes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_opcodes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_sizes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_sizes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_opcodes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_opcodes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_sizes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_sizes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_probe_ack_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_probe_ack_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_probe_ack_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_probe_ack_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_4_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_5_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_2_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_3_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] _c_set_wo_ready_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_wo_ready_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_wo_ready_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_set_wo_ready_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_set_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_interm_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_interm_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_sizes_set_interm_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_sizes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_sizes_set_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_probe_ack_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_probe_ack_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_probe_ack_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_2_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_probe_ack_WIRE_3_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_2_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_3_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_4_bits_source = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_5_bits_source = 4'h0; // @[Bundles.scala:265:61] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [15:0] _a_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _c_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hFF; // @[Monitor.scala:724:57] wire [16:0] _a_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _c_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hFF; // @[Monitor.scala:724:57] wire [15:0] _a_size_lookup_T_3 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _c_size_lookup_T_3 = 16'h100; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h100; // @[Monitor.scala:724:51] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [131:0] _c_sizes_set_T_1 = 132'h0; // @[Monitor.scala:768:52] wire [6:0] _c_opcodes_set_T = 7'h0; // @[Monitor.scala:767:79] wire [6:0] _c_sizes_set_T = 7'h0; // @[Monitor.scala:768:77] wire [130:0] _c_opcodes_set_T_1 = 131'h0; // @[Monitor.scala:767:54] wire [4:0] _c_sizes_set_interm_T_1 = 5'h1; // @[Monitor.scala:766:59] wire [4:0] c_sizes_set_interm = 5'h0; // @[Monitor.scala:755:40] wire [4:0] _c_sizes_set_interm_T = 5'h0; // @[Monitor.scala:766:51] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [15:0] _c_set_wo_ready_T = 16'h1; // @[OneHot.scala:58:35] wire [15:0] _c_set_T = 16'h1; // @[OneHot.scala:58:35] wire [127:0] c_sizes_set = 128'h0; // @[Monitor.scala:741:34] wire [15:0] c_set = 16'h0; // @[Monitor.scala:738:34] wire [15:0] c_set_wo_ready = 16'h0; // @[Monitor.scala:739:34] wire [11:0] _c_first_beats1_decode_T_2 = 12'h0; // @[package.scala:243:46] wire [11:0] _c_first_beats1_decode_T_1 = 12'hFFF; // @[package.scala:243:76] wire [26:0] _c_first_beats1_decode_T = 27'hFFF; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [3:0] _a_size_lookup_T_2 = 4'h8; // @[Monitor.scala:641:117] wire [3:0] _d_sizes_clr_T = 4'h8; // @[Monitor.scala:681:48] wire [3:0] _c_size_lookup_T_2 = 4'h8; // @[Monitor.scala:750:119] wire [3:0] _d_sizes_clr_T_6 = 4'h8; // @[Monitor.scala:791:48] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire [3:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _source_ok_uncommonBits_T_4 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] _source_ok_T = io_in_a_bits_source_0[3:2]; // @[Monitor.scala:36:7] wire [1:0] _source_ok_T_6 = io_in_a_bits_source_0[3:2]; // @[Monitor.scala:36:7] wire [1:0] _source_ok_T_12 = io_in_a_bits_source_0[3:2]; // @[Monitor.scala:36:7] wire [1:0] _source_ok_T_18 = io_in_a_bits_source_0[3:2]; // @[Monitor.scala:36:7] wire _source_ok_T_1 = _source_ok_T == 2'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_3 = _source_ok_T_1; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_5 = _source_ok_T_3; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_0 = _source_ok_T_5; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_7 = _source_ok_T_6 == 2'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_9 = _source_ok_T_7; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_11 = _source_ok_T_9; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1 = _source_ok_T_11; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_13 = _source_ok_T_12 == 2'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_15 = _source_ok_T_13; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_17 = _source_ok_T_15; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2 = _source_ok_T_17; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_19 = &_source_ok_T_18; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_21 = _source_ok_T_19; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_23 = _source_ok_T_21; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_3 = _source_ok_T_23; // @[Parameters.scala:1138:31] wire _source_ok_T_24 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_25 = _source_ok_T_24 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46] wire source_ok = _source_ok_T_25 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46] wire [26:0] _GEN = 27'hFFF << io_in_a_bits_size_0; // @[package.scala:243:71] wire [26:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [11:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [31:0] _is_aligned_T = {20'h0, io_in_a_bits_address_0[11:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 32'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 4'h2; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] _source_ok_T_26 = io_in_d_bits_source_0[3:2]; // @[Monitor.scala:36:7] wire [1:0] _source_ok_T_32 = io_in_d_bits_source_0[3:2]; // @[Monitor.scala:36:7] wire [1:0] _source_ok_T_38 = io_in_d_bits_source_0[3:2]; // @[Monitor.scala:36:7] wire [1:0] _source_ok_T_44 = io_in_d_bits_source_0[3:2]; // @[Monitor.scala:36:7] wire _source_ok_T_27 = _source_ok_T_26 == 2'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_29 = _source_ok_T_27; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_31 = _source_ok_T_29; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_0 = _source_ok_T_31; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_33 = _source_ok_T_32 == 2'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_35 = _source_ok_T_33; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_37 = _source_ok_T_35; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_1 = _source_ok_T_37; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_39 = _source_ok_T_38 == 2'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_41 = _source_ok_T_39; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_43 = _source_ok_T_41; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_2 = _source_ok_T_43; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_45 = &_source_ok_T_44; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_47 = _source_ok_T_45; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_49 = _source_ok_T_47; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_3 = _source_ok_T_49; // @[Parameters.scala:1138:31] wire _source_ok_T_50 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_51 = _source_ok_T_50 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46] wire source_ok_1 = _source_ok_T_51 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46] wire _T_1477 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_1477; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_1477; // @[Decoupled.scala:51:35] wire [11:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [8:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [8:0] a_first_counter; // @[Edges.scala:229:27] wire [9:0] _a_first_counter1_T = {1'h0, a_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] a_first_counter1 = _a_first_counter1_T[8:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [8:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [8:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [3:0] size; // @[Monitor.scala:389:22] reg [3:0] source; // @[Monitor.scala:390:22] reg [31:0] address; // @[Monitor.scala:391:22] wire _T_1550 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_1550; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_1550; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_1550; // @[Decoupled.scala:51:35] wire [26:0] _GEN_0 = 27'hFFF << io_in_d_bits_size_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [11:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [8:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] d_first_counter; // @[Edges.scala:229:27] wire [9:0] _d_first_counter1_T = {1'h0, d_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] d_first_counter1 = _d_first_counter1_T[8:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [8:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [8:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [3:0] size_1; // @[Monitor.scala:540:22] reg [3:0] source_1; // @[Monitor.scala:541:22] reg [4:0] sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [15:0] inflight; // @[Monitor.scala:614:27] reg [63:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [127:0] inflight_sizes; // @[Monitor.scala:618:33] wire [11:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [8:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [8:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [8:0] a_first_counter_1; // @[Edges.scala:229:27] wire [9:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] a_first_counter1_1 = _a_first_counter1_T_1[8:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [8:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [8:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [11:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [8:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46] wire [8:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] d_first_counter_1; // @[Edges.scala:229:27] wire [9:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] d_first_counter1_1 = _d_first_counter1_T_1[8:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [8:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [8:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [15:0] a_set; // @[Monitor.scala:626:34] wire [15:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [63:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [127:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [6:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [6:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [6:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [6:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [6:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [63:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [63:0] _a_opcode_lookup_T_6 = {60'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [63:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[63:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [7:0] a_size_lookup; // @[Monitor.scala:639:33] wire [6:0] _GEN_2 = {io_in_d_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :641:65] wire [6:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65] wire [6:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_2; // @[Monitor.scala:641:65, :681:99] wire [6:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65, :750:67] wire [6:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_2; // @[Monitor.scala:641:65, :791:99] wire [127:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [127:0] _a_size_lookup_T_6 = {120'h0, _a_size_lookup_T_1[7:0]}; // @[Monitor.scala:641:{40,91}] wire [127:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[127:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[7:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [4:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [15:0] _GEN_3 = {12'h0, io_in_a_bits_source_0}; // @[OneHot.scala:58:35] wire [15:0] _GEN_4 = 16'h1 << _GEN_3; // @[OneHot.scala:58:35] wire [15:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_4; // @[OneHot.scala:58:35] wire [15:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_4; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T : 16'h0; // @[OneHot.scala:58:35] wire _T_1403 = _T_1477 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_1403 ? _a_set_T : 16'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_1403 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [4:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [4:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_1403 ? _a_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [6:0] _a_opcodes_set_T = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [130:0] _a_opcodes_set_T_1 = {127'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_1403 ? _a_opcodes_set_T_1[63:0] : 64'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [6:0] _a_sizes_set_T = {io_in_a_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :660:77] wire [131:0] _a_sizes_set_T_1 = {127'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_1403 ? _a_sizes_set_T_1[127:0] : 128'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [15:0] d_clr; // @[Monitor.scala:664:34] wire [15:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [63:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [127:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_5 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_5; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_5; // @[Monitor.scala:673:46, :783:46] wire _T_1449 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [15:0] _GEN_6 = {12'h0, io_in_d_bits_source_0}; // @[OneHot.scala:58:35] wire [15:0] _GEN_7 = 16'h1 << _GEN_6; // @[OneHot.scala:58:35] wire [15:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_7; // @[OneHot.scala:58:35] wire [15:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_7; // @[OneHot.scala:58:35] wire [15:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_7; // @[OneHot.scala:58:35] wire [15:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_7; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_1449 & ~d_release_ack ? _d_clr_wo_ready_T : 16'h0; // @[OneHot.scala:58:35] wire _T_1418 = _T_1550 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_1418 ? _d_clr_T : 16'h0; // @[OneHot.scala:58:35] wire [142:0] _d_opcodes_clr_T_5 = 143'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_1418 ? _d_opcodes_clr_T_5[63:0] : 64'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [142:0] _d_sizes_clr_T_5 = 143'hFF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_1418 ? _d_sizes_clr_T_5[127:0] : 128'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [15:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [15:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [15:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [63:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [63:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [63:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [127:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [127:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [127:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [15:0] inflight_1; // @[Monitor.scala:726:35] wire [15:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [63:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [63:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [127:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [127:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [11:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [8:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[11:3]; // @[package.scala:243:46] wire [8:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] d_first_counter_2; // @[Edges.scala:229:27] wire [9:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] d_first_counter1_2 = _d_first_counter1_T_2[8:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 9'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 9'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [8:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [8:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [7:0] c_size_lookup; // @[Monitor.scala:748:35] wire [63:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [63:0] _c_opcode_lookup_T_6 = {60'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [63:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[63:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [127:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [127:0] _c_size_lookup_T_6 = {120'h0, _c_size_lookup_T_1[7:0]}; // @[Monitor.scala:750:{42,93}] wire [127:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[127:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[7:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [15:0] d_clr_1; // @[Monitor.scala:774:34] wire [15:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [63:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [127:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_1521 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_1521 & d_release_ack_1 ? _d_clr_wo_ready_T_1 : 16'h0; // @[OneHot.scala:58:35] wire _T_1503 = _T_1550 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_1503 ? _d_clr_T_1 : 16'h0; // @[OneHot.scala:58:35] wire [142:0] _d_opcodes_clr_T_11 = 143'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_1503 ? _d_opcodes_clr_T_11[63:0] : 64'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [142:0] _d_sizes_clr_T_11 = 143'hFF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_1503 ? _d_sizes_clr_T_11[127:0] : 128'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 4'h0; // @[Monitor.scala:36:7, :795:113] wire [15:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [15:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [63:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [63:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [127:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [127:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File INToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import consts._ class INToRecFN(intWidth: Int, expWidth: Int, sigWidth: Int) extends RawModule { override def desiredName = s"INToRecFN_i${intWidth}_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val signedIn = Input(Bool()) val in = Input(Bits(intWidth.W)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val intAsRawFloat = rawFloatFromIN(io.signedIn, io.in); val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( intAsRawFloat.expWidth, intWidth, expWidth, sigWidth, flRoundOpt_sigMSBitAlwaysZero | flRoundOpt_neverUnderflows )) roundAnyRawFNToRecFN.io.invalidExc := false.B roundAnyRawFNToRecFN.io.infiniteExc := false.B roundAnyRawFNToRecFN.io.in := intAsRawFloat roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags } File rawFloatFromIN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ object rawFloatFromIN { def apply(signedIn: Bool, in: Bits): RawFloat = { val expWidth = log2Up(in.getWidth) + 1 //*** CHANGE THIS; CAN BE VERY LARGE: val extIntWidth = 1<<(expWidth - 1) val sign = signedIn && in(in.getWidth - 1) val absIn = Mux(sign, -in.asUInt, in.asUInt) val extAbsIn = (0.U(extIntWidth.W) ## absIn)(extIntWidth - 1, 0) val adjustedNormDist = countLeadingZeros(extAbsIn) val sig = (extAbsIn<<adjustedNormDist)( extIntWidth - 1, extIntWidth - in.getWidth) val out = Wire(new RawFloat(expWidth, in.getWidth)) out.isNaN := false.B out.isInf := false.B out.isZero := ! sig(in.getWidth - 1) out.sign := sign out.sExp := (2.U(2.W) ## ~adjustedNormDist(expWidth - 2, 0)).zext out.sig := sig out } }
module INToRecFN_i32_e8_s24_15( // @[INToRecFN.scala:43:7] input [31:0] io_in, // @[INToRecFN.scala:46:16] output [32:0] io_out // @[INToRecFN.scala:46:16] ); wire [31:0] io_in_0 = io_in; // @[INToRecFN.scala:43:7] wire intAsRawFloat_isNaN = 1'h0; // @[rawFloatFromIN.scala:59:23] wire intAsRawFloat_isInf = 1'h0; // @[rawFloatFromIN.scala:59:23] wire [2:0] io_roundingMode = 3'h0; // @[INToRecFN.scala:43:7, :46:16, :60:15] wire io_signedIn = 1'h1; // @[INToRecFN.scala:43:7] wire io_detectTininess = 1'h1; // @[INToRecFN.scala:43:7] wire [32:0] io_out_0; // @[INToRecFN.scala:43:7] wire [4:0] io_exceptionFlags; // @[INToRecFN.scala:43:7] wire _intAsRawFloat_sign_T = io_in_0[31]; // @[rawFloatFromIN.scala:51:34] wire intAsRawFloat_sign = _intAsRawFloat_sign_T; // @[rawFloatFromIN.scala:51:{29,34}] wire intAsRawFloat_sign_0 = intAsRawFloat_sign; // @[rawFloatFromIN.scala:51:29, :59:23] wire [32:0] _intAsRawFloat_absIn_T = 33'h0 - {1'h0, io_in_0}; // @[rawFloatFromIN.scala:52:31] wire [31:0] _intAsRawFloat_absIn_T_1 = _intAsRawFloat_absIn_T[31:0]; // @[rawFloatFromIN.scala:52:31] wire [31:0] intAsRawFloat_absIn = intAsRawFloat_sign ? _intAsRawFloat_absIn_T_1 : io_in_0; // @[rawFloatFromIN.scala:51:29, :52:{24,31}] wire [63:0] _intAsRawFloat_extAbsIn_T = {32'h0, intAsRawFloat_absIn}; // @[rawFloatFromIN.scala:52:24, :53:44] wire [31:0] intAsRawFloat_extAbsIn = _intAsRawFloat_extAbsIn_T[31:0]; // @[rawFloatFromIN.scala:53:{44,53}] wire _intAsRawFloat_adjustedNormDist_T = intAsRawFloat_extAbsIn[0]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_1 = intAsRawFloat_extAbsIn[1]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_2 = intAsRawFloat_extAbsIn[2]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_3 = intAsRawFloat_extAbsIn[3]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_4 = intAsRawFloat_extAbsIn[4]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_5 = intAsRawFloat_extAbsIn[5]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_6 = intAsRawFloat_extAbsIn[6]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_7 = intAsRawFloat_extAbsIn[7]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_8 = intAsRawFloat_extAbsIn[8]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_9 = intAsRawFloat_extAbsIn[9]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_10 = intAsRawFloat_extAbsIn[10]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_11 = intAsRawFloat_extAbsIn[11]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_12 = intAsRawFloat_extAbsIn[12]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_13 = intAsRawFloat_extAbsIn[13]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_14 = intAsRawFloat_extAbsIn[14]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_15 = intAsRawFloat_extAbsIn[15]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_16 = intAsRawFloat_extAbsIn[16]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_17 = intAsRawFloat_extAbsIn[17]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_18 = intAsRawFloat_extAbsIn[18]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_19 = intAsRawFloat_extAbsIn[19]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_20 = intAsRawFloat_extAbsIn[20]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_21 = intAsRawFloat_extAbsIn[21]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_22 = intAsRawFloat_extAbsIn[22]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_23 = intAsRawFloat_extAbsIn[23]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_24 = intAsRawFloat_extAbsIn[24]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_25 = intAsRawFloat_extAbsIn[25]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_26 = intAsRawFloat_extAbsIn[26]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_27 = intAsRawFloat_extAbsIn[27]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_28 = intAsRawFloat_extAbsIn[28]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_29 = intAsRawFloat_extAbsIn[29]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_30 = intAsRawFloat_extAbsIn[30]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_31 = intAsRawFloat_extAbsIn[31]; // @[rawFloatFromIN.scala:53:53] wire [4:0] _intAsRawFloat_adjustedNormDist_T_32 = {4'hF, ~_intAsRawFloat_adjustedNormDist_T_1}; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_33 = _intAsRawFloat_adjustedNormDist_T_2 ? 5'h1D : _intAsRawFloat_adjustedNormDist_T_32; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_34 = _intAsRawFloat_adjustedNormDist_T_3 ? 5'h1C : _intAsRawFloat_adjustedNormDist_T_33; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_35 = _intAsRawFloat_adjustedNormDist_T_4 ? 5'h1B : _intAsRawFloat_adjustedNormDist_T_34; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_36 = _intAsRawFloat_adjustedNormDist_T_5 ? 5'h1A : _intAsRawFloat_adjustedNormDist_T_35; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_37 = _intAsRawFloat_adjustedNormDist_T_6 ? 5'h19 : _intAsRawFloat_adjustedNormDist_T_36; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_38 = _intAsRawFloat_adjustedNormDist_T_7 ? 5'h18 : _intAsRawFloat_adjustedNormDist_T_37; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_39 = _intAsRawFloat_adjustedNormDist_T_8 ? 5'h17 : _intAsRawFloat_adjustedNormDist_T_38; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_40 = _intAsRawFloat_adjustedNormDist_T_9 ? 5'h16 : _intAsRawFloat_adjustedNormDist_T_39; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_41 = _intAsRawFloat_adjustedNormDist_T_10 ? 5'h15 : _intAsRawFloat_adjustedNormDist_T_40; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_42 = _intAsRawFloat_adjustedNormDist_T_11 ? 5'h14 : _intAsRawFloat_adjustedNormDist_T_41; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_43 = _intAsRawFloat_adjustedNormDist_T_12 ? 5'h13 : _intAsRawFloat_adjustedNormDist_T_42; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_44 = _intAsRawFloat_adjustedNormDist_T_13 ? 5'h12 : _intAsRawFloat_adjustedNormDist_T_43; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_45 = _intAsRawFloat_adjustedNormDist_T_14 ? 5'h11 : _intAsRawFloat_adjustedNormDist_T_44; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_46 = _intAsRawFloat_adjustedNormDist_T_15 ? 5'h10 : _intAsRawFloat_adjustedNormDist_T_45; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_47 = _intAsRawFloat_adjustedNormDist_T_16 ? 5'hF : _intAsRawFloat_adjustedNormDist_T_46; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_48 = _intAsRawFloat_adjustedNormDist_T_17 ? 5'hE : _intAsRawFloat_adjustedNormDist_T_47; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_49 = _intAsRawFloat_adjustedNormDist_T_18 ? 5'hD : _intAsRawFloat_adjustedNormDist_T_48; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_50 = _intAsRawFloat_adjustedNormDist_T_19 ? 5'hC : _intAsRawFloat_adjustedNormDist_T_49; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_51 = _intAsRawFloat_adjustedNormDist_T_20 ? 5'hB : _intAsRawFloat_adjustedNormDist_T_50; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_52 = _intAsRawFloat_adjustedNormDist_T_21 ? 5'hA : _intAsRawFloat_adjustedNormDist_T_51; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_53 = _intAsRawFloat_adjustedNormDist_T_22 ? 5'h9 : _intAsRawFloat_adjustedNormDist_T_52; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_54 = _intAsRawFloat_adjustedNormDist_T_23 ? 5'h8 : _intAsRawFloat_adjustedNormDist_T_53; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_55 = _intAsRawFloat_adjustedNormDist_T_24 ? 5'h7 : _intAsRawFloat_adjustedNormDist_T_54; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_56 = _intAsRawFloat_adjustedNormDist_T_25 ? 5'h6 : _intAsRawFloat_adjustedNormDist_T_55; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_57 = _intAsRawFloat_adjustedNormDist_T_26 ? 5'h5 : _intAsRawFloat_adjustedNormDist_T_56; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_58 = _intAsRawFloat_adjustedNormDist_T_27 ? 5'h4 : _intAsRawFloat_adjustedNormDist_T_57; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_59 = _intAsRawFloat_adjustedNormDist_T_28 ? 5'h3 : _intAsRawFloat_adjustedNormDist_T_58; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_60 = _intAsRawFloat_adjustedNormDist_T_29 ? 5'h2 : _intAsRawFloat_adjustedNormDist_T_59; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_adjustedNormDist_T_61 = _intAsRawFloat_adjustedNormDist_T_30 ? 5'h1 : _intAsRawFloat_adjustedNormDist_T_60; // @[Mux.scala:50:70] wire [4:0] intAsRawFloat_adjustedNormDist = _intAsRawFloat_adjustedNormDist_T_31 ? 5'h0 : _intAsRawFloat_adjustedNormDist_T_61; // @[Mux.scala:50:70] wire [4:0] _intAsRawFloat_out_sExp_T = intAsRawFloat_adjustedNormDist; // @[Mux.scala:50:70] wire [62:0] _intAsRawFloat_sig_T = {31'h0, intAsRawFloat_extAbsIn} << intAsRawFloat_adjustedNormDist; // @[Mux.scala:50:70] wire [31:0] intAsRawFloat_sig = _intAsRawFloat_sig_T[31:0]; // @[rawFloatFromIN.scala:56:{22,41}] wire _intAsRawFloat_out_isZero_T_1; // @[rawFloatFromIN.scala:62:23] wire [7:0] _intAsRawFloat_out_sExp_T_3; // @[rawFloatFromIN.scala:64:72] wire intAsRawFloat_isZero; // @[rawFloatFromIN.scala:59:23] wire [7:0] intAsRawFloat_sExp; // @[rawFloatFromIN.scala:59:23] wire [32:0] intAsRawFloat_sig_0; // @[rawFloatFromIN.scala:59:23] wire _intAsRawFloat_out_isZero_T = intAsRawFloat_sig[31]; // @[rawFloatFromIN.scala:56:41, :62:28] assign _intAsRawFloat_out_isZero_T_1 = ~_intAsRawFloat_out_isZero_T; // @[rawFloatFromIN.scala:62:{23,28}] assign intAsRawFloat_isZero = _intAsRawFloat_out_isZero_T_1; // @[rawFloatFromIN.scala:59:23, :62:23] wire [4:0] _intAsRawFloat_out_sExp_T_1 = ~_intAsRawFloat_out_sExp_T; // @[rawFloatFromIN.scala:64:{36,53}] wire [6:0] _intAsRawFloat_out_sExp_T_2 = {2'h2, _intAsRawFloat_out_sExp_T_1}; // @[rawFloatFromIN.scala:64:{33,36}] assign _intAsRawFloat_out_sExp_T_3 = {1'h0, _intAsRawFloat_out_sExp_T_2}; // @[rawFloatFromIN.scala:64:{33,72}] assign intAsRawFloat_sExp = _intAsRawFloat_out_sExp_T_3; // @[rawFloatFromIN.scala:59:23, :64:72] assign intAsRawFloat_sig_0 = {1'h0, intAsRawFloat_sig}; // @[rawFloatFromIN.scala:56:41, :59:23, :65:20] RoundAnyRawFNToRecFN_ie6_is32_oe8_os24_15 roundAnyRawFNToRecFN ( // @[INToRecFN.scala:60:15] .io_in_isZero (intAsRawFloat_isZero), // @[rawFloatFromIN.scala:59:23] .io_in_sign (intAsRawFloat_sign_0), // @[rawFloatFromIN.scala:59:23] .io_in_sExp (intAsRawFloat_sExp), // @[rawFloatFromIN.scala:59:23] .io_in_sig (intAsRawFloat_sig_0), // @[rawFloatFromIN.scala:59:23] .io_out (io_out_0), .io_exceptionFlags (io_exceptionFlags) ); // @[INToRecFN.scala:60:15] assign io_out = io_out_0; // @[INToRecFN.scala:43:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w1_d3_i0_114( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7] wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire io_q_0; // @[SynchronizerReg.scala:80:7] assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_190 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_1), // @[SynchronizerReg.scala:87:41] .io_q (output_0) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: package constellation.channel import chisel3._ import chisel3.util._ import freechips.rocketchip.diplomacy._ import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.util._ import constellation.noc.{HasNoCParams} class NoCMonitor(val cParam: ChannelParams)(implicit val p: Parameters) extends Module with HasNoCParams { val io = IO(new Bundle { val in = Input(new Channel(cParam)) }) val in_flight = RegInit(VecInit(Seq.fill(cParam.nVirtualChannels) { false.B })) for (i <- 0 until cParam.srcSpeedup) { val flit = io.in.flit(i) when (flit.valid) { when (flit.bits.head) { in_flight(flit.bits.virt_channel_id) := true.B assert (!in_flight(flit.bits.virt_channel_id), "Flit head/tail sequencing is broken") } when (flit.bits.tail) { in_flight(flit.bits.virt_channel_id) := false.B } } val possibleFlows = cParam.possibleFlows when (flit.valid && flit.bits.head) { cParam match { case n: ChannelParams => n.virtualChannelParams.zipWithIndex.foreach { case (v,i) => assert(flit.bits.virt_channel_id =/= i.U || v.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR) } case _ => assert(cParam.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR) } } } }
module NoCMonitor_95( // @[Monitor.scala:11:7] input clock, // @[Monitor.scala:11:7] input reset, // @[Monitor.scala:11:7] input io_in_flit_0_valid, // @[Monitor.scala:12:14] input io_in_flit_0_bits_head, // @[Monitor.scala:12:14] input io_in_flit_0_bits_tail, // @[Monitor.scala:12:14] input [4:0] io_in_flit_0_bits_flow_ingress_node, // @[Monitor.scala:12:14] input [1:0] io_in_flit_0_bits_flow_ingress_node_id, // @[Monitor.scala:12:14] input [4:0] io_in_flit_0_bits_flow_egress_node, // @[Monitor.scala:12:14] input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[Monitor.scala:12:14] input [2:0] io_in_flit_0_bits_virt_channel_id // @[Monitor.scala:12:14] ); reg in_flight_0; // @[Monitor.scala:16:26] reg in_flight_1; // @[Monitor.scala:16:26] reg in_flight_2; // @[Monitor.scala:16:26] reg in_flight_3; // @[Monitor.scala:16:26] reg in_flight_4; // @[Monitor.scala:16:26] wire _GEN = io_in_flit_0_bits_virt_channel_id == 3'h0; // @[Monitor.scala:21:46] wire _GEN_0 = io_in_flit_0_bits_virt_channel_id == 3'h2; // @[Monitor.scala:21:46] wire _GEN_1 = io_in_flit_0_bits_virt_channel_id == 3'h3; // @[Monitor.scala:21:46] wire _GEN_2 = io_in_flit_0_bits_virt_channel_id == 3'h4; // @[Monitor.scala:21:46]
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_41( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35] reg [8:0] a_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [3:0] source; // @[Monitor.scala:390:22] reg [31:0] address; // @[Monitor.scala:391:22] reg [8:0] d_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [3:0] size_1; // @[Monitor.scala:540:22] reg [3:0] source_1; // @[Monitor.scala:541:22] reg [2:0] sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [15:0] inflight; // @[Monitor.scala:614:27] reg [63:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [127:0] inflight_sizes; // @[Monitor.scala:618:33] reg [8:0] a_first_counter_1; // @[Edges.scala:229:27] wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] reg [8:0] d_first_counter_1; // @[Edges.scala:229:27] wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] wire [15:0] _GEN = {12'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35] wire _GEN_0 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35] wire _GEN_1 = io_in_d_valid & d_first_1; // @[Monitor.scala:674:26] wire _GEN_2 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74] wire _GEN_3 = _GEN_1 & _GEN_2; // @[Monitor.scala:673:46, :674:{26,71,74}] wire [15:0] _GEN_4 = {12'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35] reg [31:0] watchdog; // @[Monitor.scala:709:27] reg [15:0] inflight_1; // @[Monitor.scala:726:35] reg [127:0] inflight_sizes_1; // @[Monitor.scala:728:35] reg [8:0] d_first_counter_2; // @[Edges.scala:229:27] wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _GEN_5 = io_in_d_valid & d_first_2 & io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46, :784:26, :788:70] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File RoundAnyRawFNToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util.Fill import consts._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundAnyRawFNToRecFN( inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int, options: Int ) extends RawModule { override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(inExpWidth, inSigWidth)) // (allowed exponent range has limits) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((outExpWidth + outSigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0) val effectiveInSigWidth = if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1 val neverUnderflows = ((options & (flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact) ) != 0) || (inExpWidth < outExpWidth) val neverOverflows = ((options & flRoundOpt_neverOverflows) != 0) || (inExpWidth < outExpWidth) val outNaNExp = BigInt(7)<<(outExpWidth - 2) val outInfExp = BigInt(6)<<(outExpWidth - 2) val outMaxFiniteExp = outInfExp - 1 val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2 val outMinNonzeroExp = outMinNormExp - outSigWidth + 1 //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundingMode_near_even = (io.roundingMode === round_near_even) val roundingMode_minMag = (io.roundingMode === round_minMag) val roundingMode_min = (io.roundingMode === round_min) val roundingMode_max = (io.roundingMode === round_max) val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag) val roundingMode_odd = (io.roundingMode === round_odd) val roundMagUp = (roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sAdjustedExp = if (inExpWidth < outExpWidth) (io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S )(outExpWidth, 0).zext else if (inExpWidth == outExpWidth) io.in.sExp else io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S val adjustedSig = if (inSigWidth <= outSigWidth + 2) io.in.sig<<(outSigWidth - inSigWidth + 2) else (io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ## io.in.sig(inSigWidth - outSigWidth - 2, 0).orR ) val doShiftSigDown1 = if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2) val common_expOut = Wire(UInt((outExpWidth + 1).W)) val common_fractOut = Wire(UInt((outSigWidth - 1).W)) val common_overflow = Wire(Bool()) val common_totalUnderflow = Wire(Bool()) val common_underflow = Wire(Bool()) val common_inexact = Wire(Bool()) if ( neverOverflows && neverUnderflows && (effectiveInSigWidth <= outSigWidth) ) { //-------------------------------------------------------------------- //-------------------------------------------------------------------- common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1 common_fractOut := Mux(doShiftSigDown1, adjustedSig(outSigWidth + 1, 3), adjustedSig(outSigWidth, 2) ) common_overflow := false.B common_totalUnderflow := false.B common_underflow := false.B common_inexact := false.B } else { //-------------------------------------------------------------------- //-------------------------------------------------------------------- val roundMask = if (neverUnderflows) 0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W) else (lowMask( sAdjustedExp(outExpWidth, 0), outMinNormExp - outSigWidth - 1, outMinNormExp ) | doShiftSigDown1) ## 3.U(2.W) val shiftedRoundMask = 0.U(1.W) ## roundMask>>1 val roundPosMask = ~shiftedRoundMask & roundMask val roundPosBit = (adjustedSig & roundPosMask).orR val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR val anyRound = roundPosBit || anyRoundExtra val roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && roundPosBit) || (roundMagUp && anyRound) val roundedSig: Bits = Mux(roundIncr, (((adjustedSig | roundMask)>>2) +& 1.U) & ~Mux(roundingMode_near_even && roundPosBit && ! anyRoundExtra, roundMask>>1, 0.U((outSigWidth + 2).W) ), (adjustedSig & ~roundMask)>>2 | Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U) ) //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext common_expOut := sRoundedExp(outExpWidth, 0) common_fractOut := Mux(doShiftSigDown1, roundedSig(outSigWidth - 1, 1), roundedSig(outSigWidth - 2, 0) ) common_overflow := (if (neverOverflows) false.B else //*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?: (sRoundedExp>>(outExpWidth - 1) >= 3.S)) common_totalUnderflow := (if (neverUnderflows) false.B else //*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?: (sRoundedExp < outMinNonzeroExp.S)) val unboundedRange_roundPosBit = Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1)) val unboundedRange_anyRound = (doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR val unboundedRange_roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && unboundedRange_roundPosBit) || (roundMagUp && unboundedRange_anyRound) val roundCarry = Mux(doShiftSigDown1, roundedSig(outSigWidth + 1), roundedSig(outSigWidth) ) common_underflow := (if (neverUnderflows) false.B else common_totalUnderflow || //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? (anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) && Mux(doShiftSigDown1, roundMask(3), roundMask(2)) && ! ((io.detectTininess === tininess_afterRounding) && ! Mux(doShiftSigDown1, roundMask(4), roundMask(3) ) && roundCarry && roundPosBit && unboundedRange_roundIncr))) common_inexact := common_totalUnderflow || anyRound } //------------------------------------------------------------------------ //------------------------------------------------------------------------ val isNaNOut = io.invalidExc || io.in.isNaN val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero val overflow = commonCase && common_overflow val underflow = commonCase && common_underflow val inexact = overflow || (commonCase && common_inexact) val overflow_roundMagUp = roundingMode_near_even || roundingMode_near_maxMag || roundMagUp val pegMinNonzeroMagOut = commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd) val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp val notNaN_isInfOut = notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp) val signOut = Mux(isNaNOut, false.B, io.in.sign) val expOut = (common_expOut & ~Mux(io.in.isZero || common_totalUnderflow, (BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMinNonzeroMagOut, ~outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMaxFiniteMagOut, (BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W), 0.U ) & ~Mux(notNaN_isInfOut, (BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U )) | Mux(pegMinNonzeroMagOut, outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) | Mux(pegMaxFiniteMagOut, outMaxFiniteExp.U((outExpWidth + 1).W), 0.U ) | Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) | Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U) val fractOut = Mux(isNaNOut || io.in.isZero || common_totalUnderflow, Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U), common_fractOut ) | Fill(outSigWidth - 1, pegMaxFiniteMagOut) io.out := signOut ## expOut ## fractOut io.exceptionFlags := io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int) extends RawModule { override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(expWidth, sigWidth + 2)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( expWidth, sigWidth + 2, expWidth, sigWidth, options)) roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc roundAnyRawFNToRecFN.io.in := io.in roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags }
module RoundRawFNToRecFN_e8_s24_9( // @[RoundAnyRawFNToRecFN.scala:295:5] input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:299:16] input [9:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:299:16] input [26:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:299:16] output [32:0] io_out, // @[RoundAnyRawFNToRecFN.scala:299:16] output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:299:16] ); wire io_invalidExc_0 = io_invalidExc; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_isNaN_0 = io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_isInf_0 = io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:295:5] wire [9:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:295:5] wire [26:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15] wire [2:0] io_roundingMode = 3'h0; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15] wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15] wire [32:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5] wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5] RoundAnyRawFNToRecFN_ie8_is26_oe8_os24_9 roundAnyRawFNToRecFN ( // @[RoundAnyRawFNToRecFN.scala:310:15] .io_invalidExc (io_invalidExc_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_isNaN (io_in_isNaN_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_isInf (io_in_isInf_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_isZero (io_in_isZero_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_sign (io_in_sign_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_sExp (io_in_sExp_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_sig (io_in_sig_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_out (io_out_0), .io_exceptionFlags (io_exceptionFlags_0) ); // @[RoundAnyRawFNToRecFN.scala:310:15] assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5] assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Tile.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ import Util._ /** * A Tile is a purely combinational 2D array of passThrough PEs. * a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs * @param width The data width of each PE in bits * @param rows Number of PEs on each row * @param columns Number of PEs on each column */ class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module { val io = IO(new Bundle { val in_a = Input(Vec(rows, inputType)) val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it val in_d = Input(Vec(columns, outputType)) val in_control = Input(Vec(columns, new PEControl(accType))) val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val in_last = Input(Vec(columns, Bool())) val out_a = Output(Vec(rows, inputType)) val out_c = Output(Vec(columns, outputType)) val out_b = Output(Vec(columns, outputType)) val out_control = Output(Vec(columns, new PEControl(accType))) val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val out_last = Output(Vec(columns, Bool())) val in_valid = Input(Vec(columns, Bool())) val out_valid = Output(Vec(columns, Bool())) val bad_dataflow = Output(Bool()) }) import ev._ val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls))) val tileT = tile.transpose // TODO: abstract hori/vert broadcast, all these connections look the same // Broadcast 'a' horizontally across the Tile for (r <- 0 until rows) { tile(r).foldLeft(io.in_a(r)) { case (in_a, pe) => pe.io.in_a := in_a pe.io.out_a } } // Broadcast 'b' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_b(c)) { case (in_b, pe) => pe.io.in_b := (if (tree_reduction) in_b.zero else in_b) pe.io.out_b } } // Broadcast 'd' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_d(c)) { case (in_d, pe) => pe.io.in_d := in_d pe.io.out_c } } // Broadcast 'control' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_control(c)) { case (in_ctrl, pe) => pe.io.in_control := in_ctrl pe.io.out_control } } // Broadcast 'garbage' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_valid(c)) { case (v, pe) => pe.io.in_valid := v pe.io.out_valid } } // Broadcast 'id' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_id(c)) { case (id, pe) => pe.io.in_id := id pe.io.out_id } } // Broadcast 'last' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_last(c)) { case (last, pe) => pe.io.in_last := last pe.io.out_last } } // Drive the Tile's bottom IO for (c <- 0 until columns) { io.out_c(c) := tile(rows-1)(c).io.out_c io.out_control(c) := tile(rows-1)(c).io.out_control io.out_id(c) := tile(rows-1)(c).io.out_id io.out_last(c) := tile(rows-1)(c).io.out_last io.out_valid(c) := tile(rows-1)(c).io.out_valid io.out_b(c) := { if (tree_reduction) { val prods = tileT(c).map(_.io.out_b) accumulateTree(prods :+ io.in_b(c)) } else { tile(rows - 1)(c).io.out_b } } } io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_) // Drive the Tile's right IO for (r <- 0 until rows) { io.out_a(r) := tile(r)(columns-1).io.out_a } }
module Tile_15( // @[Tile.scala:16:7] input clock, // @[Tile.scala:16:7] input reset, // @[Tile.scala:16:7] input [31:0] io_in_a_0_bits, // @[Tile.scala:17:14] input [31:0] io_in_b_0_bits, // @[Tile.scala:17:14] input [31:0] io_in_d_0_bits, // @[Tile.scala:17:14] input io_in_control_0_dataflow, // @[Tile.scala:17:14] input io_in_control_0_propagate, // @[Tile.scala:17:14] input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14] input [3:0] io_in_id_0, // @[Tile.scala:17:14] input io_in_last_0, // @[Tile.scala:17:14] output [31:0] io_out_c_0_bits, // @[Tile.scala:17:14] output [31:0] io_out_b_0_bits, // @[Tile.scala:17:14] output io_out_control_0_dataflow, // @[Tile.scala:17:14] output io_out_control_0_propagate, // @[Tile.scala:17:14] output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14] output [3:0] io_out_id_0, // @[Tile.scala:17:14] output io_out_last_0, // @[Tile.scala:17:14] input io_in_valid_0, // @[Tile.scala:17:14] output io_out_valid_0, // @[Tile.scala:17:14] output io_bad_dataflow // @[Tile.scala:17:14] ); wire [31:0] io_in_a_0_bits_0 = io_in_a_0_bits; // @[Tile.scala:16:7] wire [31:0] io_in_b_0_bits_0 = io_in_b_0_bits; // @[Tile.scala:16:7] wire [31:0] io_in_d_0_bits_0 = io_in_d_0_bits; // @[Tile.scala:16:7] wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7] wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7] wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7] wire [3:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7] wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7] wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7] wire [31:0] io_out_a_0_bits; // @[Tile.scala:16:7] wire [31:0] io_out_c_0_bits_0; // @[Tile.scala:16:7] wire [31:0] io_out_b_0_bits_0; // @[Tile.scala:16:7] wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7] wire io_out_control_0_propagate_0; // @[Tile.scala:16:7] wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7] wire [3:0] io_out_id_0_0; // @[Tile.scala:16:7] wire io_out_last_0_0; // @[Tile.scala:16:7] wire io_out_valid_0_0; // @[Tile.scala:16:7] wire io_bad_dataflow_0; // @[Tile.scala:16:7] PE_31 tile_0_0 ( // @[Tile.scala:42:44] .clock (clock), .reset (reset), .io_in_a_bits (io_in_a_0_bits_0), // @[Tile.scala:16:7] .io_in_b_bits (io_in_b_0_bits_0), // @[Tile.scala:16:7] .io_in_d_bits (io_in_d_0_bits_0), // @[Tile.scala:16:7] .io_out_a_bits (io_out_a_0_bits), .io_out_b_bits (io_out_b_0_bits_0), .io_out_c_bits (io_out_c_0_bits_0), .io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7] .io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7] .io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7] .io_out_control_dataflow (io_out_control_0_dataflow_0), .io_out_control_propagate (io_out_control_0_propagate_0), .io_out_control_shift (io_out_control_0_shift_0), .io_in_id (io_in_id_0_0), // @[Tile.scala:16:7] .io_out_id (io_out_id_0_0), .io_in_last (io_in_last_0_0), // @[Tile.scala:16:7] .io_out_last (io_out_last_0_0), .io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7] .io_out_valid (io_out_valid_0_0), .io_bad_dataflow (io_bad_dataflow_0) ); // @[Tile.scala:42:44] assign io_out_c_0_bits = io_out_c_0_bits_0; // @[Tile.scala:16:7] assign io_out_b_0_bits = io_out_b_0_bits_0; // @[Tile.scala:16:7] assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7] assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7] assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7] assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7] assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7] assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7] assign io_bad_dataflow = io_bad_dataflow_0; // @[Tile.scala:16:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File IOHandler.scala: package shuttle.dmem import chisel3._ import chisel3.util._ import chisel3.experimental.dataview._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ import freechips.rocketchip.rocket._ class IOHandler(id: Int)(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) { val io = IO(new Bundle { val req = Flipped(Decoupled(new ShuttleDMemReq)) val resp = Decoupled(new ShuttleDMemResp) val mem_access = Decoupled(new TLBundleA(edge.bundle)) val mem_ack = Flipped(Valid(new TLBundleD(edge.bundle))) val replay_next = Output(Bool()) val store_pending = Output(Bool()) }) def beatOffset(addr: UInt) = addr.extract(beatOffBits - 1, wordOffBits) def wordFromBeat(addr: UInt, dat: UInt) = { val shift = Cat(beatOffset(addr), 0.U((wordOffBits + log2Up(wordBytes)).W)) (dat >> shift)(wordBits - 1, 0) } val req = Reg(new ShuttleDMemReq) val grant_word = Reg(UInt(wordBits.W)) val s_idle :: s_mem_access :: s_mem_ack :: s_resp :: Nil = Enum(4) val state = RegInit(s_idle) io.req.ready := (state === s_idle) val loadgen = new LoadGen(req.size, req.signed, req.addr, grant_word, false.B, wordBytes) val a_source = id.U val a_address = req.addr val a_size = req.size val a_data = Fill(beatWords, req.data) val get = edge.Get(a_source, a_address, a_size)._2 val put = edge.Put(a_source, a_address, a_size, a_data)._2 val atomics = if (edge.manager.anySupportLogical) { MuxLookup(req.cmd, (0.U).asTypeOf(new TLBundleA(edge.bundle)))(Array( M_XA_SWAP -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.SWAP)._2, M_XA_XOR -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.XOR) ._2, M_XA_OR -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.OR) ._2, M_XA_AND -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.AND) ._2, M_XA_ADD -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.ADD)._2, M_XA_MIN -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MIN)._2, M_XA_MAX -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MAX)._2, M_XA_MINU -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MINU)._2, M_XA_MAXU -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MAXU)._2)) } else { // If no managers support atomics, assert fail if processor asks for them assert(state === s_idle || !isAMO(req.cmd)) (0.U).asTypeOf(new TLBundleA(edge.bundle)) } assert(state === s_idle || req.cmd =/= M_XSC) io.mem_access.valid := (state === s_mem_access) io.mem_access.bits := Mux(isAMO(req.cmd), atomics, Mux(isRead(req.cmd), get, put)) io.replay_next := (state === s_mem_ack) || io.resp.valid && !io.resp.ready io.resp.valid := (state === s_resp) io.resp.bits.tag := req.tag io.resp.bits.has_data := isRead(req.cmd) io.resp.bits.data := loadgen.data io.resp.bits.size := req.size io.store_pending := state =/= s_idle && isWrite(req.cmd) when (io.req.fire) { req := io.req.bits state := s_mem_access } when (io.mem_access.fire) { state := s_mem_ack } when (state === s_mem_ack && io.mem_ack.valid) { state := s_resp when (isRead(req.cmd)) { grant_word := wordFromBeat(req.addr, io.mem_ack.bits.data) } } when (io.resp.fire) { state := s_idle } }
module IOHandler( // @[IOHandler.scala:13:7] input clock, // @[IOHandler.scala:13:7] input reset, // @[IOHandler.scala:13:7] output io_req_ready, // @[IOHandler.scala:14:14] input io_req_valid, // @[IOHandler.scala:14:14] input [39:0] io_req_bits_addr, // @[IOHandler.scala:14:14] input [6:0] io_req_bits_tag, // @[IOHandler.scala:14:14] input [4:0] io_req_bits_cmd, // @[IOHandler.scala:14:14] input [1:0] io_req_bits_size, // @[IOHandler.scala:14:14] input io_req_bits_signed, // @[IOHandler.scala:14:14] input [63:0] io_req_bits_data, // @[IOHandler.scala:14:14] input io_resp_ready, // @[IOHandler.scala:14:14] output io_resp_valid, // @[IOHandler.scala:14:14] output io_resp_bits_has_data, // @[IOHandler.scala:14:14] output [6:0] io_resp_bits_tag, // @[IOHandler.scala:14:14] output [63:0] io_resp_bits_data, // @[IOHandler.scala:14:14] output [1:0] io_resp_bits_size, // @[IOHandler.scala:14:14] input io_mem_access_ready, // @[IOHandler.scala:14:14] output io_mem_access_valid, // @[IOHandler.scala:14:14] output [2:0] io_mem_access_bits_opcode, // @[IOHandler.scala:14:14] output [2:0] io_mem_access_bits_param, // @[IOHandler.scala:14:14] output [3:0] io_mem_access_bits_size, // @[IOHandler.scala:14:14] output [2:0] io_mem_access_bits_source, // @[IOHandler.scala:14:14] output [31:0] io_mem_access_bits_address, // @[IOHandler.scala:14:14] output [7:0] io_mem_access_bits_mask, // @[IOHandler.scala:14:14] output [63:0] io_mem_access_bits_data, // @[IOHandler.scala:14:14] input io_mem_ack_valid, // @[IOHandler.scala:14:14] input [63:0] io_mem_ack_bits_data, // @[IOHandler.scala:14:14] output io_store_pending // @[IOHandler.scala:14:14] ); reg [39:0] req_addr; // @[IOHandler.scala:30:16] reg [6:0] req_tag; // @[IOHandler.scala:30:16] reg [4:0] req_cmd; // @[IOHandler.scala:30:16] reg [1:0] req_size; // @[IOHandler.scala:30:16] reg req_signed; // @[IOHandler.scala:30:16] reg [63:0] req_data; // @[IOHandler.scala:30:16] reg [63:0] grant_word; // @[IOHandler.scala:31:23] reg [1:0] state; // @[IOHandler.scala:34:22] wire io_req_ready_0 = state == 2'h0; // @[IOHandler.scala:34:22, :35:26] wire get_a_mask_sub_sub_size = req_size == 2'h2; // @[IOHandler.scala:30:16] wire get_a_mask_sub_sub_0_1 = (&req_size) | get_a_mask_sub_sub_size & ~(req_addr[2]); // @[IOHandler.scala:30:16] wire get_a_mask_sub_sub_1_1 = (&req_size) | get_a_mask_sub_sub_size & req_addr[2]; // @[IOHandler.scala:30:16] wire get_a_mask_sub_size = req_size == 2'h1; // @[IOHandler.scala:30:16] wire get_a_mask_sub_0_2 = ~(req_addr[2]) & ~(req_addr[1]); // @[IOHandler.scala:30:16] wire get_a_mask_sub_0_1 = get_a_mask_sub_sub_0_1 | get_a_mask_sub_size & get_a_mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire get_a_mask_sub_1_2 = ~(req_addr[2]) & req_addr[1]; // @[IOHandler.scala:30:16] wire get_a_mask_sub_1_1 = get_a_mask_sub_sub_0_1 | get_a_mask_sub_size & get_a_mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire get_a_mask_sub_2_2 = req_addr[2] & ~(req_addr[1]); // @[IOHandler.scala:30:16] wire get_a_mask_sub_2_1 = get_a_mask_sub_sub_1_1 | get_a_mask_sub_size & get_a_mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire get_a_mask_sub_3_2 = req_addr[2] & req_addr[1]; // @[IOHandler.scala:30:16] wire get_a_mask_sub_3_1 = get_a_mask_sub_sub_1_1 | get_a_mask_sub_size & get_a_mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire put_a_mask_sub_sub_size = req_size == 2'h2; // @[IOHandler.scala:30:16] wire put_a_mask_sub_sub_0_1 = (&req_size) | put_a_mask_sub_sub_size & ~(req_addr[2]); // @[IOHandler.scala:30:16] wire put_a_mask_sub_sub_1_1 = (&req_size) | put_a_mask_sub_sub_size & req_addr[2]; // @[IOHandler.scala:30:16] wire put_a_mask_sub_size = req_size == 2'h1; // @[IOHandler.scala:30:16] wire put_a_mask_sub_0_2 = ~(req_addr[2]) & ~(req_addr[1]); // @[IOHandler.scala:30:16] wire put_a_mask_sub_0_1 = put_a_mask_sub_sub_0_1 | put_a_mask_sub_size & put_a_mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire put_a_mask_sub_1_2 = ~(req_addr[2]) & req_addr[1]; // @[IOHandler.scala:30:16] wire put_a_mask_sub_1_1 = put_a_mask_sub_sub_0_1 | put_a_mask_sub_size & put_a_mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire put_a_mask_sub_2_2 = req_addr[2] & ~(req_addr[1]); // @[IOHandler.scala:30:16] wire put_a_mask_sub_2_1 = put_a_mask_sub_sub_1_1 | put_a_mask_sub_size & put_a_mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire put_a_mask_sub_3_2 = req_addr[2] & req_addr[1]; // @[IOHandler.scala:30:16] wire put_a_mask_sub_3_1 = put_a_mask_sub_sub_1_1 | put_a_mask_sub_size & put_a_mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_sub_size = req_size == 2'h2; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_sub_0_1 = (&req_size) | atomics_a_mask_sub_sub_size & ~(req_addr[2]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_sub_1_1 = (&req_size) | atomics_a_mask_sub_sub_size & req_addr[2]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_size = req_size == 2'h1; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_0_2 = ~(req_addr[2]) & ~(req_addr[1]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_0_1 = atomics_a_mask_sub_sub_0_1 | atomics_a_mask_sub_size & atomics_a_mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_1_2 = ~(req_addr[2]) & req_addr[1]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_1_1 = atomics_a_mask_sub_sub_0_1 | atomics_a_mask_sub_size & atomics_a_mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_2_2 = req_addr[2] & ~(req_addr[1]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_2_1 = atomics_a_mask_sub_sub_1_1 | atomics_a_mask_sub_size & atomics_a_mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_3_2 = req_addr[2] & req_addr[1]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_3_1 = atomics_a_mask_sub_sub_1_1 | atomics_a_mask_sub_size & atomics_a_mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_sub_size_1 = req_size == 2'h2; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_sub_0_1_1 = (&req_size) | atomics_a_mask_sub_sub_size_1 & ~(req_addr[2]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_sub_1_1_1 = (&req_size) | atomics_a_mask_sub_sub_size_1 & req_addr[2]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_size_1 = req_size == 2'h1; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_0_2_1 = ~(req_addr[2]) & ~(req_addr[1]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_0_1_1 = atomics_a_mask_sub_sub_0_1_1 | atomics_a_mask_sub_size_1 & atomics_a_mask_sub_0_2_1; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_1_2_1 = ~(req_addr[2]) & req_addr[1]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_1_1_1 = atomics_a_mask_sub_sub_0_1_1 | atomics_a_mask_sub_size_1 & atomics_a_mask_sub_1_2_1; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_2_2_1 = req_addr[2] & ~(req_addr[1]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_2_1_1 = atomics_a_mask_sub_sub_1_1_1 | atomics_a_mask_sub_size_1 & atomics_a_mask_sub_2_2_1; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_3_2_1 = req_addr[2] & req_addr[1]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_3_1_1 = atomics_a_mask_sub_sub_1_1_1 | atomics_a_mask_sub_size_1 & atomics_a_mask_sub_3_2_1; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_sub_size_2 = req_size == 2'h2; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_sub_0_1_2 = (&req_size) | atomics_a_mask_sub_sub_size_2 & ~(req_addr[2]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_sub_1_1_2 = (&req_size) | atomics_a_mask_sub_sub_size_2 & req_addr[2]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_size_2 = req_size == 2'h1; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_0_2_2 = ~(req_addr[2]) & ~(req_addr[1]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_0_1_2 = atomics_a_mask_sub_sub_0_1_2 | atomics_a_mask_sub_size_2 & atomics_a_mask_sub_0_2_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_1_2_2 = ~(req_addr[2]) & req_addr[1]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_1_1_2 = atomics_a_mask_sub_sub_0_1_2 | atomics_a_mask_sub_size_2 & atomics_a_mask_sub_1_2_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_2_2_2 = req_addr[2] & ~(req_addr[1]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_2_1_2 = atomics_a_mask_sub_sub_1_1_2 | atomics_a_mask_sub_size_2 & atomics_a_mask_sub_2_2_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_3_2_2 = req_addr[2] & req_addr[1]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_3_1_2 = atomics_a_mask_sub_sub_1_1_2 | atomics_a_mask_sub_size_2 & atomics_a_mask_sub_3_2_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_sub_size_3 = req_size == 2'h2; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_sub_0_1_3 = (&req_size) | atomics_a_mask_sub_sub_size_3 & ~(req_addr[2]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_sub_1_1_3 = (&req_size) | atomics_a_mask_sub_sub_size_3 & req_addr[2]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_size_3 = req_size == 2'h1; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_0_2_3 = ~(req_addr[2]) & ~(req_addr[1]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_0_1_3 = atomics_a_mask_sub_sub_0_1_3 | atomics_a_mask_sub_size_3 & atomics_a_mask_sub_0_2_3; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_1_2_3 = ~(req_addr[2]) & req_addr[1]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_1_1_3 = atomics_a_mask_sub_sub_0_1_3 | atomics_a_mask_sub_size_3 & atomics_a_mask_sub_1_2_3; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_2_2_3 = req_addr[2] & ~(req_addr[1]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_2_1_3 = atomics_a_mask_sub_sub_1_1_3 | atomics_a_mask_sub_size_3 & atomics_a_mask_sub_2_2_3; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_3_2_3 = req_addr[2] & req_addr[1]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_3_1_3 = atomics_a_mask_sub_sub_1_1_3 | atomics_a_mask_sub_size_3 & atomics_a_mask_sub_3_2_3; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_sub_size_4 = req_size == 2'h2; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_sub_0_1_4 = (&req_size) | atomics_a_mask_sub_sub_size_4 & ~(req_addr[2]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_sub_1_1_4 = (&req_size) | atomics_a_mask_sub_sub_size_4 & req_addr[2]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_size_4 = req_size == 2'h1; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_0_2_4 = ~(req_addr[2]) & ~(req_addr[1]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_0_1_4 = atomics_a_mask_sub_sub_0_1_4 | atomics_a_mask_sub_size_4 & atomics_a_mask_sub_0_2_4; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_1_2_4 = ~(req_addr[2]) & req_addr[1]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_1_1_4 = atomics_a_mask_sub_sub_0_1_4 | atomics_a_mask_sub_size_4 & atomics_a_mask_sub_1_2_4; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_2_2_4 = req_addr[2] & ~(req_addr[1]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_2_1_4 = atomics_a_mask_sub_sub_1_1_4 | atomics_a_mask_sub_size_4 & atomics_a_mask_sub_2_2_4; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_3_2_4 = req_addr[2] & req_addr[1]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_3_1_4 = atomics_a_mask_sub_sub_1_1_4 | atomics_a_mask_sub_size_4 & atomics_a_mask_sub_3_2_4; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_sub_size_5 = req_size == 2'h2; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_sub_0_1_5 = (&req_size) | atomics_a_mask_sub_sub_size_5 & ~(req_addr[2]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_sub_1_1_5 = (&req_size) | atomics_a_mask_sub_sub_size_5 & req_addr[2]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_size_5 = req_size == 2'h1; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_0_2_5 = ~(req_addr[2]) & ~(req_addr[1]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_0_1_5 = atomics_a_mask_sub_sub_0_1_5 | atomics_a_mask_sub_size_5 & atomics_a_mask_sub_0_2_5; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_1_2_5 = ~(req_addr[2]) & req_addr[1]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_1_1_5 = atomics_a_mask_sub_sub_0_1_5 | atomics_a_mask_sub_size_5 & atomics_a_mask_sub_1_2_5; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_2_2_5 = req_addr[2] & ~(req_addr[1]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_2_1_5 = atomics_a_mask_sub_sub_1_1_5 | atomics_a_mask_sub_size_5 & atomics_a_mask_sub_2_2_5; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_3_2_5 = req_addr[2] & req_addr[1]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_3_1_5 = atomics_a_mask_sub_sub_1_1_5 | atomics_a_mask_sub_size_5 & atomics_a_mask_sub_3_2_5; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_sub_size_6 = req_size == 2'h2; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_sub_0_1_6 = (&req_size) | atomics_a_mask_sub_sub_size_6 & ~(req_addr[2]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_sub_1_1_6 = (&req_size) | atomics_a_mask_sub_sub_size_6 & req_addr[2]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_size_6 = req_size == 2'h1; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_0_2_6 = ~(req_addr[2]) & ~(req_addr[1]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_0_1_6 = atomics_a_mask_sub_sub_0_1_6 | atomics_a_mask_sub_size_6 & atomics_a_mask_sub_0_2_6; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_1_2_6 = ~(req_addr[2]) & req_addr[1]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_1_1_6 = atomics_a_mask_sub_sub_0_1_6 | atomics_a_mask_sub_size_6 & atomics_a_mask_sub_1_2_6; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_2_2_6 = req_addr[2] & ~(req_addr[1]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_2_1_6 = atomics_a_mask_sub_sub_1_1_6 | atomics_a_mask_sub_size_6 & atomics_a_mask_sub_2_2_6; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_3_2_6 = req_addr[2] & req_addr[1]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_3_1_6 = atomics_a_mask_sub_sub_1_1_6 | atomics_a_mask_sub_size_6 & atomics_a_mask_sub_3_2_6; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_sub_size_7 = req_size == 2'h2; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_sub_0_1_7 = (&req_size) | atomics_a_mask_sub_sub_size_7 & ~(req_addr[2]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_sub_1_1_7 = (&req_size) | atomics_a_mask_sub_sub_size_7 & req_addr[2]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_size_7 = req_size == 2'h1; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_0_2_7 = ~(req_addr[2]) & ~(req_addr[1]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_0_1_7 = atomics_a_mask_sub_sub_0_1_7 | atomics_a_mask_sub_size_7 & atomics_a_mask_sub_0_2_7; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_1_2_7 = ~(req_addr[2]) & req_addr[1]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_1_1_7 = atomics_a_mask_sub_sub_0_1_7 | atomics_a_mask_sub_size_7 & atomics_a_mask_sub_1_2_7; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_2_2_7 = req_addr[2] & ~(req_addr[1]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_2_1_7 = atomics_a_mask_sub_sub_1_1_7 | atomics_a_mask_sub_size_7 & atomics_a_mask_sub_2_2_7; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_3_2_7 = req_addr[2] & req_addr[1]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_3_1_7 = atomics_a_mask_sub_sub_1_1_7 | atomics_a_mask_sub_size_7 & atomics_a_mask_sub_3_2_7; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_sub_size_8 = req_size == 2'h2; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_sub_0_1_8 = (&req_size) | atomics_a_mask_sub_sub_size_8 & ~(req_addr[2]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_sub_1_1_8 = (&req_size) | atomics_a_mask_sub_sub_size_8 & req_addr[2]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_size_8 = req_size == 2'h1; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_0_2_8 = ~(req_addr[2]) & ~(req_addr[1]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_0_1_8 = atomics_a_mask_sub_sub_0_1_8 | atomics_a_mask_sub_size_8 & atomics_a_mask_sub_0_2_8; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_1_2_8 = ~(req_addr[2]) & req_addr[1]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_1_1_8 = atomics_a_mask_sub_sub_0_1_8 | atomics_a_mask_sub_size_8 & atomics_a_mask_sub_1_2_8; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_2_2_8 = req_addr[2] & ~(req_addr[1]); // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_2_1_8 = atomics_a_mask_sub_sub_1_1_8 | atomics_a_mask_sub_size_8 & atomics_a_mask_sub_2_2_8; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_3_2_8 = req_addr[2] & req_addr[1]; // @[IOHandler.scala:30:16] wire atomics_a_mask_sub_3_1_8 = atomics_a_mask_sub_sub_1_1_8 | atomics_a_mask_sub_size_8 & atomics_a_mask_sub_3_2_8; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire _io_store_pending_T_6 = req_cmd == 5'h4; // @[IOHandler.scala:30:16, :47:67] wire _io_store_pending_T_7 = req_cmd == 5'h9; // @[IOHandler.scala:30:16, :47:67] wire _io_store_pending_T_8 = req_cmd == 5'hA; // @[IOHandler.scala:30:16, :47:67] wire _io_store_pending_T_9 = req_cmd == 5'hB; // @[IOHandler.scala:30:16, :47:67] wire _GEN = _io_store_pending_T_9 | _io_store_pending_T_8 | _io_store_pending_T_7 | _io_store_pending_T_6; // @[IOHandler.scala:47:67] wire _io_store_pending_T_13 = req_cmd == 5'h8; // @[IOHandler.scala:30:16, :47:67] wire _io_store_pending_T_14 = req_cmd == 5'hC; // @[IOHandler.scala:30:16, :47:67] wire _io_store_pending_T_15 = req_cmd == 5'hD; // @[IOHandler.scala:30:16, :47:67] wire _io_store_pending_T_16 = req_cmd == 5'hE; // @[IOHandler.scala:30:16, :47:67] wire _io_store_pending_T_17 = req_cmd == 5'hF; // @[IOHandler.scala:30:16, :47:67] wire _GEN_0 = _io_store_pending_T_17 | _io_store_pending_T_16 | _io_store_pending_T_15 | _io_store_pending_T_14 | _io_store_pending_T_13; // @[IOHandler.scala:47:67] wire [7:0] atomics_mask = _io_store_pending_T_17 ? {atomics_a_mask_sub_3_1_8 | atomics_a_mask_sub_3_2_8 & req_addr[0], atomics_a_mask_sub_3_1_8 | atomics_a_mask_sub_3_2_8 & ~(req_addr[0]), atomics_a_mask_sub_2_1_8 | atomics_a_mask_sub_2_2_8 & req_addr[0], atomics_a_mask_sub_2_1_8 | atomics_a_mask_sub_2_2_8 & ~(req_addr[0]), atomics_a_mask_sub_1_1_8 | atomics_a_mask_sub_1_2_8 & req_addr[0], atomics_a_mask_sub_1_1_8 | atomics_a_mask_sub_1_2_8 & ~(req_addr[0]), atomics_a_mask_sub_0_1_8 | atomics_a_mask_sub_0_2_8 & req_addr[0], atomics_a_mask_sub_0_1_8 | atomics_a_mask_sub_0_2_8 & ~(req_addr[0])} : _io_store_pending_T_16 ? {atomics_a_mask_sub_3_1_7 | atomics_a_mask_sub_3_2_7 & req_addr[0], atomics_a_mask_sub_3_1_7 | atomics_a_mask_sub_3_2_7 & ~(req_addr[0]), atomics_a_mask_sub_2_1_7 | atomics_a_mask_sub_2_2_7 & req_addr[0], atomics_a_mask_sub_2_1_7 | atomics_a_mask_sub_2_2_7 & ~(req_addr[0]), atomics_a_mask_sub_1_1_7 | atomics_a_mask_sub_1_2_7 & req_addr[0], atomics_a_mask_sub_1_1_7 | atomics_a_mask_sub_1_2_7 & ~(req_addr[0]), atomics_a_mask_sub_0_1_7 | atomics_a_mask_sub_0_2_7 & req_addr[0], atomics_a_mask_sub_0_1_7 | atomics_a_mask_sub_0_2_7 & ~(req_addr[0])} : _io_store_pending_T_15 ? {atomics_a_mask_sub_3_1_6 | atomics_a_mask_sub_3_2_6 & req_addr[0], atomics_a_mask_sub_3_1_6 | atomics_a_mask_sub_3_2_6 & ~(req_addr[0]), atomics_a_mask_sub_2_1_6 | atomics_a_mask_sub_2_2_6 & req_addr[0], atomics_a_mask_sub_2_1_6 | atomics_a_mask_sub_2_2_6 & ~(req_addr[0]), atomics_a_mask_sub_1_1_6 | atomics_a_mask_sub_1_2_6 & req_addr[0], atomics_a_mask_sub_1_1_6 | atomics_a_mask_sub_1_2_6 & ~(req_addr[0]), atomics_a_mask_sub_0_1_6 | atomics_a_mask_sub_0_2_6 & req_addr[0], atomics_a_mask_sub_0_1_6 | atomics_a_mask_sub_0_2_6 & ~(req_addr[0])} : _io_store_pending_T_14 ? {atomics_a_mask_sub_3_1_5 | atomics_a_mask_sub_3_2_5 & req_addr[0], atomics_a_mask_sub_3_1_5 | atomics_a_mask_sub_3_2_5 & ~(req_addr[0]), atomics_a_mask_sub_2_1_5 | atomics_a_mask_sub_2_2_5 & req_addr[0], atomics_a_mask_sub_2_1_5 | atomics_a_mask_sub_2_2_5 & ~(req_addr[0]), atomics_a_mask_sub_1_1_5 | atomics_a_mask_sub_1_2_5 & req_addr[0], atomics_a_mask_sub_1_1_5 | atomics_a_mask_sub_1_2_5 & ~(req_addr[0]), atomics_a_mask_sub_0_1_5 | atomics_a_mask_sub_0_2_5 & req_addr[0], atomics_a_mask_sub_0_1_5 | atomics_a_mask_sub_0_2_5 & ~(req_addr[0])} : _io_store_pending_T_13 ? {atomics_a_mask_sub_3_1_4 | atomics_a_mask_sub_3_2_4 & req_addr[0], atomics_a_mask_sub_3_1_4 | atomics_a_mask_sub_3_2_4 & ~(req_addr[0]), atomics_a_mask_sub_2_1_4 | atomics_a_mask_sub_2_2_4 & req_addr[0], atomics_a_mask_sub_2_1_4 | atomics_a_mask_sub_2_2_4 & ~(req_addr[0]), atomics_a_mask_sub_1_1_4 | atomics_a_mask_sub_1_2_4 & req_addr[0], atomics_a_mask_sub_1_1_4 | atomics_a_mask_sub_1_2_4 & ~(req_addr[0]), atomics_a_mask_sub_0_1_4 | atomics_a_mask_sub_0_2_4 & req_addr[0], atomics_a_mask_sub_0_1_4 | atomics_a_mask_sub_0_2_4 & ~(req_addr[0])} : _io_store_pending_T_9 ? {atomics_a_mask_sub_3_1_3 | atomics_a_mask_sub_3_2_3 & req_addr[0], atomics_a_mask_sub_3_1_3 | atomics_a_mask_sub_3_2_3 & ~(req_addr[0]), atomics_a_mask_sub_2_1_3 | atomics_a_mask_sub_2_2_3 & req_addr[0], atomics_a_mask_sub_2_1_3 | atomics_a_mask_sub_2_2_3 & ~(req_addr[0]), atomics_a_mask_sub_1_1_3 | atomics_a_mask_sub_1_2_3 & req_addr[0], atomics_a_mask_sub_1_1_3 | atomics_a_mask_sub_1_2_3 & ~(req_addr[0]), atomics_a_mask_sub_0_1_3 | atomics_a_mask_sub_0_2_3 & req_addr[0], atomics_a_mask_sub_0_1_3 | atomics_a_mask_sub_0_2_3 & ~(req_addr[0])} : _io_store_pending_T_8 ? {atomics_a_mask_sub_3_1_2 | atomics_a_mask_sub_3_2_2 & req_addr[0], atomics_a_mask_sub_3_1_2 | atomics_a_mask_sub_3_2_2 & ~(req_addr[0]), atomics_a_mask_sub_2_1_2 | atomics_a_mask_sub_2_2_2 & req_addr[0], atomics_a_mask_sub_2_1_2 | atomics_a_mask_sub_2_2_2 & ~(req_addr[0]), atomics_a_mask_sub_1_1_2 | atomics_a_mask_sub_1_2_2 & req_addr[0], atomics_a_mask_sub_1_1_2 | atomics_a_mask_sub_1_2_2 & ~(req_addr[0]), atomics_a_mask_sub_0_1_2 | atomics_a_mask_sub_0_2_2 & req_addr[0], atomics_a_mask_sub_0_1_2 | atomics_a_mask_sub_0_2_2 & ~(req_addr[0])} : _io_store_pending_T_7 ? {atomics_a_mask_sub_3_1_1 | atomics_a_mask_sub_3_2_1 & req_addr[0], atomics_a_mask_sub_3_1_1 | atomics_a_mask_sub_3_2_1 & ~(req_addr[0]), atomics_a_mask_sub_2_1_1 | atomics_a_mask_sub_2_2_1 & req_addr[0], atomics_a_mask_sub_2_1_1 | atomics_a_mask_sub_2_2_1 & ~(req_addr[0]), atomics_a_mask_sub_1_1_1 | atomics_a_mask_sub_1_2_1 & req_addr[0], atomics_a_mask_sub_1_1_1 | atomics_a_mask_sub_1_2_1 & ~(req_addr[0]), atomics_a_mask_sub_0_1_1 | atomics_a_mask_sub_0_2_1 & req_addr[0], atomics_a_mask_sub_0_1_1 | atomics_a_mask_sub_0_2_1 & ~(req_addr[0])} : _io_store_pending_T_6 ? {atomics_a_mask_sub_3_1 | atomics_a_mask_sub_3_2 & req_addr[0], atomics_a_mask_sub_3_1 | atomics_a_mask_sub_3_2 & ~(req_addr[0]), atomics_a_mask_sub_2_1 | atomics_a_mask_sub_2_2 & req_addr[0], atomics_a_mask_sub_2_1 | atomics_a_mask_sub_2_2 & ~(req_addr[0]), atomics_a_mask_sub_1_1 | atomics_a_mask_sub_1_2 & req_addr[0], atomics_a_mask_sub_1_1 | atomics_a_mask_sub_1_2 & ~(req_addr[0]), atomics_a_mask_sub_0_1 | atomics_a_mask_sub_0_2 & req_addr[0], atomics_a_mask_sub_0_1 | atomics_a_mask_sub_0_2 & ~(req_addr[0])} : 8'h0; // @[IOHandler.scala:30:16, :47:67]
Generate the Verilog code corresponding to the following Chisel files. File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag }
module OptimizationBarrier_TLBEntryData_214( // @[package.scala:267:30] input clock, // @[package.scala:267:30] input reset, // @[package.scala:267:30] input [19:0] io_x_ppn, // @[package.scala:268:18] input io_x_u, // @[package.scala:268:18] input io_x_g, // @[package.scala:268:18] input io_x_ae_ptw, // @[package.scala:268:18] input io_x_ae_final, // @[package.scala:268:18] input io_x_ae_stage2, // @[package.scala:268:18] input io_x_pf, // @[package.scala:268:18] input io_x_gf, // @[package.scala:268:18] input io_x_sw, // @[package.scala:268:18] input io_x_sx, // @[package.scala:268:18] input io_x_sr, // @[package.scala:268:18] input io_x_hw, // @[package.scala:268:18] input io_x_hx, // @[package.scala:268:18] input io_x_hr, // @[package.scala:268:18] input io_x_pw, // @[package.scala:268:18] input io_x_px, // @[package.scala:268:18] input io_x_pr, // @[package.scala:268:18] input io_x_ppp, // @[package.scala:268:18] input io_x_pal, // @[package.scala:268:18] input io_x_paa, // @[package.scala:268:18] input io_x_eff, // @[package.scala:268:18] input io_x_c, // @[package.scala:268:18] input io_x_fragmented_superpage, // @[package.scala:268:18] output [19:0] io_y_ppn, // @[package.scala:268:18] output io_y_u, // @[package.scala:268:18] output io_y_ae_ptw, // @[package.scala:268:18] output io_y_ae_final, // @[package.scala:268:18] output io_y_ae_stage2, // @[package.scala:268:18] output io_y_pf, // @[package.scala:268:18] output io_y_gf, // @[package.scala:268:18] output io_y_sw, // @[package.scala:268:18] output io_y_sx, // @[package.scala:268:18] output io_y_sr, // @[package.scala:268:18] output io_y_hw, // @[package.scala:268:18] output io_y_hx, // @[package.scala:268:18] output io_y_hr, // @[package.scala:268:18] output io_y_pw, // @[package.scala:268:18] output io_y_px, // @[package.scala:268:18] output io_y_pr, // @[package.scala:268:18] output io_y_ppp, // @[package.scala:268:18] output io_y_pal, // @[package.scala:268:18] output io_y_paa, // @[package.scala:268:18] output io_y_eff, // @[package.scala:268:18] output io_y_c // @[package.scala:268:18] ); wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30] wire io_x_u_0 = io_x_u; // @[package.scala:267:30] wire io_x_g_0 = io_x_g; // @[package.scala:267:30] wire io_x_ae_ptw_0 = io_x_ae_ptw; // @[package.scala:267:30] wire io_x_ae_final_0 = io_x_ae_final; // @[package.scala:267:30] wire io_x_ae_stage2_0 = io_x_ae_stage2; // @[package.scala:267:30] wire io_x_pf_0 = io_x_pf; // @[package.scala:267:30] wire io_x_gf_0 = io_x_gf; // @[package.scala:267:30] wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30] wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30] wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30] wire io_x_hw_0 = io_x_hw; // @[package.scala:267:30] wire io_x_hx_0 = io_x_hx; // @[package.scala:267:30] wire io_x_hr_0 = io_x_hr; // @[package.scala:267:30] wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30] wire io_x_px_0 = io_x_px; // @[package.scala:267:30] wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30] wire io_x_ppp_0 = io_x_ppp; // @[package.scala:267:30] wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30] wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30] wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30] wire io_x_c_0 = io_x_c; // @[package.scala:267:30] wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30] wire [19:0] io_y_ppn_0 = io_x_ppn_0; // @[package.scala:267:30] wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30] wire io_y_g = io_x_g_0; // @[package.scala:267:30] wire io_y_ae_ptw_0 = io_x_ae_ptw_0; // @[package.scala:267:30] wire io_y_ae_final_0 = io_x_ae_final_0; // @[package.scala:267:30] wire io_y_ae_stage2_0 = io_x_ae_stage2_0; // @[package.scala:267:30] wire io_y_pf_0 = io_x_pf_0; // @[package.scala:267:30] wire io_y_gf_0 = io_x_gf_0; // @[package.scala:267:30] wire io_y_sw_0 = io_x_sw_0; // @[package.scala:267:30] wire io_y_sx_0 = io_x_sx_0; // @[package.scala:267:30] wire io_y_sr_0 = io_x_sr_0; // @[package.scala:267:30] wire io_y_hw_0 = io_x_hw_0; // @[package.scala:267:30] wire io_y_hx_0 = io_x_hx_0; // @[package.scala:267:30] wire io_y_hr_0 = io_x_hr_0; // @[package.scala:267:30] wire io_y_pw_0 = io_x_pw_0; // @[package.scala:267:30] wire io_y_px_0 = io_x_px_0; // @[package.scala:267:30] wire io_y_pr_0 = io_x_pr_0; // @[package.scala:267:30] wire io_y_ppp_0 = io_x_ppp_0; // @[package.scala:267:30] wire io_y_pal_0 = io_x_pal_0; // @[package.scala:267:30] wire io_y_paa_0 = io_x_paa_0; // @[package.scala:267:30] wire io_y_eff_0 = io_x_eff_0; // @[package.scala:267:30] wire io_y_c_0 = io_x_c_0; // @[package.scala:267:30] wire io_y_fragmented_superpage = io_x_fragmented_superpage_0; // @[package.scala:267:30] assign io_y_ppn = io_y_ppn_0; // @[package.scala:267:30] assign io_y_u = io_y_u_0; // @[package.scala:267:30] assign io_y_ae_ptw = io_y_ae_ptw_0; // @[package.scala:267:30] assign io_y_ae_final = io_y_ae_final_0; // @[package.scala:267:30] assign io_y_ae_stage2 = io_y_ae_stage2_0; // @[package.scala:267:30] assign io_y_pf = io_y_pf_0; // @[package.scala:267:30] assign io_y_gf = io_y_gf_0; // @[package.scala:267:30] assign io_y_sw = io_y_sw_0; // @[package.scala:267:30] assign io_y_sx = io_y_sx_0; // @[package.scala:267:30] assign io_y_sr = io_y_sr_0; // @[package.scala:267:30] assign io_y_hw = io_y_hw_0; // @[package.scala:267:30] assign io_y_hx = io_y_hx_0; // @[package.scala:267:30] assign io_y_hr = io_y_hr_0; // @[package.scala:267:30] assign io_y_pw = io_y_pw_0; // @[package.scala:267:30] assign io_y_px = io_y_px_0; // @[package.scala:267:30] assign io_y_pr = io_y_pr_0; // @[package.scala:267:30] assign io_y_ppp = io_y_ppp_0; // @[package.scala:267:30] assign io_y_pal = io_y_pal_0; // @[package.scala:267:30] assign io_y_paa = io_y_paa_0; // @[package.scala:267:30] assign io_y_eff = io_y_eff_0; // @[package.scala:267:30] assign io_y_c = io_y_c_0; // @[package.scala:267:30] endmodule
Generate the Verilog code corresponding to the following Chisel files. File AsyncResetReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ /** This black-boxes an Async Reset * (or Set) * Register. * * Because Chisel doesn't support * parameterized black boxes, * we unfortunately have to * instantiate a number of these. * * We also have to hard-code the set/ * reset behavior. * * Do not confuse an asynchronous * reset signal with an asynchronously * reset reg. You should still * properly synchronize your reset * deassertion. * * @param d Data input * @param q Data Output * @param clk Clock Input * @param rst Reset Input * @param en Write Enable Input * */ class AsyncResetReg(resetValue: Int = 0) extends RawModule { val io = IO(new Bundle { val d = Input(Bool()) val q = Output(Bool()) val en = Input(Bool()) val clk = Input(Clock()) val rst = Input(Reset()) }) val reg = withClockAndReset(io.clk, io.rst.asAsyncReset)(RegInit(resetValue.U(1.W))) when (io.en) { reg := io.d } io.q := reg } class SimpleRegIO(val w: Int) extends Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) } class AsyncResetRegVec(val w: Int, val init: BigInt) extends Module { override def desiredName = s"AsyncResetRegVec_w${w}_i${init}" val io = IO(new SimpleRegIO(w)) val reg = withReset(reset.asAsyncReset)(RegInit(init.U(w.W))) when (io.en) { reg := io.d } io.q := reg } object AsyncResetReg { // Create Single Registers def apply(d: Bool, clk: Clock, rst: Bool, init: Boolean, name: Option[String]): Bool = { val reg = Module(new AsyncResetReg(if (init) 1 else 0)) reg.io.d := d reg.io.clk := clk reg.io.rst := rst reg.io.en := true.B name.foreach(reg.suggestName(_)) reg.io.q } def apply(d: Bool, clk: Clock, rst: Bool): Bool = apply(d, clk, rst, false, None) def apply(d: Bool, clk: Clock, rst: Bool, name: String): Bool = apply(d, clk, rst, false, Some(name)) // Create Vectors of Registers def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: Option[String] = None): UInt = { val w = updateData.getWidth max resetData.bitLength val reg = Module(new AsyncResetRegVec(w, resetData)) name.foreach(reg.suggestName(_)) reg.io.d := updateData reg.io.en := enable reg.io.q } def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: String): UInt = apply(updateData, resetData, enable, Some(name)) def apply(updateData: UInt, resetData: BigInt): UInt = apply(updateData, resetData, enable = true.B) def apply(updateData: UInt, resetData: BigInt, name: String): UInt = apply(updateData, resetData, enable = true.B, Some(name)) def apply(updateData: UInt, enable: Bool): UInt = apply(updateData, resetData=BigInt(0), enable) def apply(updateData: UInt, enable: Bool, name: String): UInt = apply(updateData, resetData = BigInt(0), enable, Some(name)) def apply(updateData: UInt): UInt = apply(updateData, resetData = BigInt(0), enable = true.B) def apply(updateData: UInt, name:String): UInt = apply(updateData, resetData = BigInt(0), enable = true.B, Some(name)) }
module AsyncResetRegVec_w2_i0( // @[AsyncResetReg.scala:56:7] input clock, // @[AsyncResetReg.scala:56:7] input reset, // @[AsyncResetReg.scala:56:7] input [1:0] io_d, // @[AsyncResetReg.scala:59:14] output [1:0] io_q // @[AsyncResetReg.scala:59:14] ); reg [1:0] reg_0; // @[AsyncResetReg.scala:61:50] always @(posedge clock or posedge reset) begin // @[AsyncResetReg.scala:56:7] if (reset) // @[AsyncResetReg.scala:56:7] reg_0 <= 2'h0; // @[AsyncResetReg.scala:61:50] else // @[AsyncResetReg.scala:56:7] reg_0 <= io_d; // @[AsyncResetReg.scala:61:50] always @(posedge, posedge)
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_35( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [27:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_d_bits_source // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire [12:0] _GEN = {10'h0, io_in_a_bits_size}; // @[package.scala:243:71] wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35] reg [2:0] a_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [6:0] source; // @[Monitor.scala:390:22] reg [27:0] address; // @[Monitor.scala:391:22] reg [2:0] d_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [6:0] source_1; // @[Monitor.scala:541:22] reg [64:0] inflight; // @[Monitor.scala:614:27] reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [259:0] inflight_sizes; // @[Monitor.scala:618:33] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire [127:0] _GEN_0 = {121'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35] wire _GEN_1 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35] wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46] wire _GEN_2 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74] wire [127:0] _GEN_3 = {121'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35] reg [31:0] watchdog; // @[Monitor.scala:709:27] reg [64:0] inflight_1; // @[Monitor.scala:726:35] reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File SourceC.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import freechips.rocketchip.tilelink._ class SourceCRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val opcode = UInt(3.W) val param = UInt(3.W) val source = UInt(params.outer.bundle.sourceBits.W) val tag = UInt(params.tagBits.W) val set = UInt(params.setBits.W) val way = UInt(params.wayBits.W) val dirty = Bool() } class SourceC(params: InclusiveCacheParameters) extends Module { val io = IO(new Bundle { val req = Flipped(Decoupled(new SourceCRequest(params))) val c = Decoupled(new TLBundleC(params.outer.bundle)) // BankedStore port val bs_adr = Decoupled(new BankedStoreOuterAddress(params)) val bs_dat = Flipped(new BankedStoreOuterDecoded(params)) // RaW hazard val evict_req = new SourceDHazard(params) val evict_safe = Flipped(Bool()) }) // We ignore the depth and pipe is useless here (we have to provision for worst-case=stall) require (!params.micro.outerBuf.c.pipe) val beatBytes = params.outer.manager.beatBytes val beats = params.cache.blockBytes / beatBytes val flow = params.micro.outerBuf.c.flow val queue = Module(new Queue(chiselTypeOf(io.c.bits), beats + 3 + (if (flow) 0 else 1), flow = flow)) // queue.io.count is far too slow val fillBits = log2Up(beats + 4) val fill = RegInit(0.U(fillBits.W)) val room = RegInit(true.B) when (queue.io.enq.fire =/= queue.io.deq.fire) { fill := fill + Mux(queue.io.enq.fire, 1.U, ~0.U(fillBits.W)) room := fill === 0.U || ((fill === 1.U || fill === 2.U) && !queue.io.enq.fire) } assert (room === queue.io.count <= 1.U) val busy = RegInit(false.B) val beat = RegInit(0.U(params.outerBeatBits.W)) val last = if (params.cache.blockBytes == params.outer.manager.beatBytes) true.B else (beat === ~(0.U(params.outerBeatBits.W))) val req = Mux(!busy, io.req.bits, RegEnable(io.req.bits, !busy && io.req.valid)) val want_data = busy || (io.req.valid && room && io.req.bits.dirty) io.req.ready := !busy && room io.evict_req.set := req.set io.evict_req.way := req.way io.bs_adr.valid := (beat.orR || io.evict_safe) && want_data io.bs_adr.bits.noop := false.B io.bs_adr.bits.way := req.way io.bs_adr.bits.set := req.set io.bs_adr.bits.beat := beat io.bs_adr.bits.mask := ~0.U(params.outerMaskBits.W) params.ccover(io.req.valid && io.req.bits.dirty && room && !io.evict_safe, "SOURCEC_HAZARD", "Prevented Eviction data hazard with backpressure") params.ccover(io.bs_adr.valid && !io.bs_adr.ready, "SOURCEC_SRAM_STALL", "Data SRAM busy") when (io.req.valid && room && io.req.bits.dirty) { busy := true.B } when (io.bs_adr.fire) { beat := beat + 1.U when (last) { busy := false.B beat := 0.U } } val s2_latch = Mux(want_data, io.bs_adr.fire, io.req.fire) val s2_valid = RegNext(s2_latch) val s2_req = RegEnable(req, s2_latch) val s2_beat = RegEnable(beat, s2_latch) val s2_last = RegEnable(last, s2_latch) val s3_latch = s2_valid val s3_valid = RegNext(s3_latch) val s3_req = RegEnable(s2_req, s3_latch) val s3_beat = RegEnable(s2_beat, s3_latch) val s3_last = RegEnable(s2_last, s3_latch) val c = Wire(chiselTypeOf(io.c)) c.valid := s3_valid c.bits.opcode := s3_req.opcode c.bits.param := s3_req.param c.bits.size := params.offsetBits.U c.bits.source := s3_req.source c.bits.address := params.expandAddress(s3_req.tag, s3_req.set, 0.U) c.bits.data := io.bs_dat.data c.bits.corrupt := false.B // We never accept at the front-end unless we're sure things will fit assert(!c.valid || c.ready) params.ccover(!c.ready, "SOURCEC_QUEUE_FULL", "Eviction queue fully utilized") queue.io.enq <> c io.c <> queue.io.deq }
module SourceC( // @[SourceC.scala:35:7] input clock, // @[SourceC.scala:35:7] input reset, // @[SourceC.scala:35:7] output io_req_ready, // @[SourceC.scala:37:14] input io_req_valid, // @[SourceC.scala:37:14] input [2:0] io_req_bits_opcode, // @[SourceC.scala:37:14] input [2:0] io_req_bits_param, // @[SourceC.scala:37:14] input [2:0] io_req_bits_source, // @[SourceC.scala:37:14] input [10:0] io_req_bits_tag, // @[SourceC.scala:37:14] input [9:0] io_req_bits_set, // @[SourceC.scala:37:14] input [2:0] io_req_bits_way, // @[SourceC.scala:37:14] input io_req_bits_dirty, // @[SourceC.scala:37:14] input io_c_ready, // @[SourceC.scala:37:14] output io_c_valid, // @[SourceC.scala:37:14] output [2:0] io_c_bits_opcode, // @[SourceC.scala:37:14] output [2:0] io_c_bits_param, // @[SourceC.scala:37:14] output [2:0] io_c_bits_size, // @[SourceC.scala:37:14] output [2:0] io_c_bits_source, // @[SourceC.scala:37:14] output [31:0] io_c_bits_address, // @[SourceC.scala:37:14] output [63:0] io_c_bits_data, // @[SourceC.scala:37:14] output io_c_bits_corrupt, // @[SourceC.scala:37:14] input io_bs_adr_ready, // @[SourceC.scala:37:14] output io_bs_adr_valid, // @[SourceC.scala:37:14] output [2:0] io_bs_adr_bits_way, // @[SourceC.scala:37:14] output [9:0] io_bs_adr_bits_set, // @[SourceC.scala:37:14] output [2:0] io_bs_adr_bits_beat, // @[SourceC.scala:37:14] input [63:0] io_bs_dat_data, // @[SourceC.scala:37:14] output [9:0] io_evict_req_set, // @[SourceC.scala:37:14] output [2:0] io_evict_req_way, // @[SourceC.scala:37:14] input io_evict_safe // @[SourceC.scala:37:14] ); wire _queue_io_enq_ready; // @[SourceC.scala:54:21] wire _queue_io_deq_valid; // @[SourceC.scala:54:21] wire [3:0] _queue_io_count; // @[SourceC.scala:54:21] reg [3:0] fill; // @[SourceC.scala:58:21] reg room; // @[SourceC.scala:59:21] reg busy; // @[SourceC.scala:66:21] reg [2:0] beat; // @[SourceC.scala:67:21] reg [2:0] req_r_opcode; // @[SourceC.scala:69:47] reg [2:0] req_r_param; // @[SourceC.scala:69:47] reg [2:0] req_r_source; // @[SourceC.scala:69:47] reg [10:0] req_r_tag; // @[SourceC.scala:69:47] reg [9:0] req_r_set; // @[SourceC.scala:69:47] reg [2:0] req_r_way; // @[SourceC.scala:69:47] wire [9:0] req_set = busy ? req_r_set : io_req_bits_set; // @[SourceC.scala:66:21, :69:{17,47}] wire [2:0] req_way = busy ? req_r_way : io_req_bits_way; // @[SourceC.scala:66:21, :69:{17,47}] wire _want_data_T = io_req_valid & room; // @[SourceC.scala:59:21, :70:41] wire want_data = busy | _want_data_T & io_req_bits_dirty; // @[SourceC.scala:66:21, :70:{24,41,49}] wire io_req_ready_0 = ~busy & room; // @[SourceC.scala:59:21, :66:21, :69:18, :72:25] wire io_bs_adr_valid_0 = ((|beat) | io_evict_safe) & want_data; // @[SourceC.scala:67:21, :70:24, :77:{28,32,50}] reg s2_valid; // @[SourceC.scala:97:25] reg [2:0] s2_req_opcode; // @[SourceC.scala:98:25] reg [2:0] s2_req_param; // @[SourceC.scala:98:25] reg [2:0] s2_req_source; // @[SourceC.scala:98:25] reg [10:0] s2_req_tag; // @[SourceC.scala:98:25] reg [9:0] s2_req_set; // @[SourceC.scala:98:25] reg s3_valid; // @[SourceC.scala:103:25] reg [2:0] s3_req_opcode; // @[SourceC.scala:104:25] reg [2:0] s3_req_param; // @[SourceC.scala:104:25] reg [2:0] s3_req_source; // @[SourceC.scala:104:25] reg [10:0] s3_req_tag; // @[SourceC.scala:104:25] reg [9:0] s3_req_set; // @[SourceC.scala:104:25]
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } }
module PE_319( // @[PE.scala:31:7] input clock, // @[PE.scala:31:7] input reset, // @[PE.scala:31:7] input [7:0] io_in_a, // @[PE.scala:35:14] input [19:0] io_in_b, // @[PE.scala:35:14] input [19:0] io_in_d, // @[PE.scala:35:14] output [7:0] io_out_a, // @[PE.scala:35:14] output [19:0] io_out_b, // @[PE.scala:35:14] output [19:0] io_out_c, // @[PE.scala:35:14] input io_in_control_dataflow, // @[PE.scala:35:14] input io_in_control_propagate, // @[PE.scala:35:14] input [4:0] io_in_control_shift, // @[PE.scala:35:14] output io_out_control_dataflow, // @[PE.scala:35:14] output io_out_control_propagate, // @[PE.scala:35:14] output [4:0] io_out_control_shift, // @[PE.scala:35:14] input [2:0] io_in_id, // @[PE.scala:35:14] output [2:0] io_out_id, // @[PE.scala:35:14] input io_in_last, // @[PE.scala:35:14] output io_out_last, // @[PE.scala:35:14] input io_in_valid, // @[PE.scala:35:14] output io_out_valid // @[PE.scala:35:14] ); wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7] wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7] wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7] wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7] wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7] wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7] wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7] wire io_in_last_0 = io_in_last; // @[PE.scala:31:7] wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7] wire io_bad_dataflow = 1'h0; // @[PE.scala:31:7] wire _io_out_c_T_5 = 1'h0; // @[Arithmetic.scala:125:33] wire _io_out_c_T_6 = 1'h0; // @[Arithmetic.scala:125:60] wire _io_out_c_T_16 = 1'h0; // @[Arithmetic.scala:125:33] wire _io_out_c_T_17 = 1'h0; // @[Arithmetic.scala:125:60] wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7] wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37] wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37] wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35] wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7] wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7] wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7] wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7] wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7] wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7] wire [19:0] io_out_b_0; // @[PE.scala:31:7] wire [19:0] io_out_c_0; // @[PE.scala:31:7] reg [7:0] c1; // @[PE.scala:70:15] wire [7:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15] wire [7:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38] reg [7:0] c2; // @[PE.scala:71:15] wire [7:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15] wire [7:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38] reg last_s; // @[PE.scala:89:25] wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21] wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25] wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25] wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32] assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32] wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32] assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32] wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25] wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53] assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53] wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66] assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66] wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53] assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53] wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66] assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66] wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53] wire [7:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15] wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50] wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}] wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25] wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27] assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27] wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27] assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27] wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66] wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}] wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}] wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81] wire [31:0] _io_out_c_zeros_T_7 = {24'h0, _io_out_c_zeros_T_6[7:0] & _io_out_c_zeros_T_1}; // @[Arithmetic.scala:102:{45,52,81}] wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}] wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}] wire [7:0] _GEN_2 = {3'h0, shift_offset}; // @[PE.scala:91:25] wire [7:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15] wire [7:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30] assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30] wire [7:0] _io_out_c_T; // @[Arithmetic.scala:107:15] assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15] wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30] wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38] wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}] wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}] wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33] wire [8:0] _io_out_c_T_2 = {_io_out_c_T[7], _io_out_c_T} + {{7{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}] wire [7:0] _io_out_c_T_3 = _io_out_c_T_2[7:0]; // @[Arithmetic.scala:107:28] wire [7:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28] wire [19:0] _io_out_c_T_7 = {{12{_io_out_c_T_4[7]}}, _io_out_c_T_4}; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_8 = _io_out_c_T_7; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_9 = _io_out_c_T_8; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}] wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37] wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37] wire [7:0] _c1_T = io_in_d_0[7:0]; // @[PE.scala:31:7] wire [7:0] _c2_T = io_in_d_0[7:0]; // @[PE.scala:31:7] wire [7:0] _c1_T_1 = _c1_T; // @[Arithmetic.scala:114:{15,33}] wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53] wire [7:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15] wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50] wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}] wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66] wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}] wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}] wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81] wire [31:0] _io_out_c_zeros_T_16 = {24'h0, _io_out_c_zeros_T_15[7:0] & _io_out_c_zeros_T_10}; // @[Arithmetic.scala:102:{45,52,81}] wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}] wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}] wire [7:0] _GEN_4 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15] wire [7:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30] assign _io_out_c_ones_digit_T_1 = _GEN_4; // @[Arithmetic.scala:103:30] wire [7:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15] assign _io_out_c_T_11 = _GEN_4; // @[Arithmetic.scala:103:30, :107:15] wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30] wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38] wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}] wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}] wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33] wire [8:0] _io_out_c_T_13 = {_io_out_c_T_11[7], _io_out_c_T_11} + {{7{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}] wire [7:0] _io_out_c_T_14 = _io_out_c_T_13[7:0]; // @[Arithmetic.scala:107:28] wire [7:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28] wire [19:0] _io_out_c_T_18 = {{12{_io_out_c_T_15[7]}}, _io_out_c_T_15}; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_19 = _io_out_c_T_18; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_20 = _io_out_c_T_19; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}] wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37] wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37] wire [7:0] _c2_T_1 = _c2_T; // @[Arithmetic.scala:114:{15,33}] wire [7:0] _mac_unit_io_in_b_T_5; // @[PE.scala:121:38] assign _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38] wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5; // @[PE.scala:121:38] assign io_out_c_0 = io_in_control_propagate_0 ? {{12{c1[7]}}, c1} : {{12{c2[7]}}, c2}; // @[PE.scala:31:7, :70:15, :71:15, :119:30, :120:16, :126:16] wire [7:0] _mac_unit_io_in_b_T_7; // @[PE.scala:127:38] assign _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38] wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7; // @[PE.scala:127:38] wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35] wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35] always @(posedge clock) begin // @[PE.scala:31:7] if (io_in_valid_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :102:95, :141:17, :142:8] c1 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :70:15] if (~(~io_in_valid_0 | io_in_control_propagate_0)) // @[PE.scala:31:7, :71:15, :102:95, :119:30, :130:10, :141:{9,17}, :143:8] c2 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :71:15] if (io_in_valid_0) // @[PE.scala:31:7] last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25] always @(posedge) MacUnit_63 mac_unit ( // @[PE.scala:64:24] .clock (clock), .reset (reset), .io_in_a (io_in_a_0), // @[PE.scala:31:7] .io_in_b (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3), // @[PE.scala:31:7, :119:30, :121:{24,38}, :127:{24,38}] .io_in_c (io_in_b_0), // @[PE.scala:31:7] .io_out_d (io_out_b_0) ); // @[PE.scala:64:24] assign io_out_a = io_out_a_0; // @[PE.scala:31:7] assign io_out_b = io_out_b_0; // @[PE.scala:31:7] assign io_out_c = io_out_c_0; // @[PE.scala:31:7] assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7] assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7] assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7] assign io_out_id = io_out_id_0; // @[PE.scala:31:7] assign io_out_last = io_out_last_0; // @[PE.scala:31:7] assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File INToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import consts._ class INToRecFN(intWidth: Int, expWidth: Int, sigWidth: Int) extends RawModule { override def desiredName = s"INToRecFN_i${intWidth}_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val signedIn = Input(Bool()) val in = Input(Bits(intWidth.W)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val intAsRawFloat = rawFloatFromIN(io.signedIn, io.in); val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( intAsRawFloat.expWidth, intWidth, expWidth, sigWidth, flRoundOpt_sigMSBitAlwaysZero | flRoundOpt_neverUnderflows )) roundAnyRawFNToRecFN.io.invalidExc := false.B roundAnyRawFNToRecFN.io.infiniteExc := false.B roundAnyRawFNToRecFN.io.in := intAsRawFloat roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags } File rawFloatFromIN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ object rawFloatFromIN { def apply(signedIn: Bool, in: Bits): RawFloat = { val expWidth = log2Up(in.getWidth) + 1 //*** CHANGE THIS; CAN BE VERY LARGE: val extIntWidth = 1<<(expWidth - 1) val sign = signedIn && in(in.getWidth - 1) val absIn = Mux(sign, -in.asUInt, in.asUInt) val extAbsIn = (0.U(extIntWidth.W) ## absIn)(extIntWidth - 1, 0) val adjustedNormDist = countLeadingZeros(extAbsIn) val sig = (extAbsIn<<adjustedNormDist)( extIntWidth - 1, extIntWidth - in.getWidth) val out = Wire(new RawFloat(expWidth, in.getWidth)) out.isNaN := false.B out.isInf := false.B out.isZero := ! sig(in.getWidth - 1) out.sign := sign out.sExp := (2.U(2.W) ## ~adjustedNormDist(expWidth - 2, 0)).zext out.sig := sig out } }
module INToRecFN_i16_e8_s24( // @[INToRecFN.scala:43:7] input io_signedIn, // @[INToRecFN.scala:46:16] input [15:0] io_in, // @[INToRecFN.scala:46:16] output [32:0] io_out // @[INToRecFN.scala:46:16] ); wire intAsRawFloat_sign = io_signedIn & io_in[15]; // @[rawFloatFromIN.scala:51:{29,34}] wire [15:0] intAsRawFloat_absIn = intAsRawFloat_sign ? 16'h0 - io_in : io_in; // @[rawFloatFromIN.scala:51:29, :52:{24,31}] wire [3:0] intAsRawFloat_adjustedNormDist = intAsRawFloat_absIn[15] ? 4'h0 : intAsRawFloat_absIn[14] ? 4'h1 : intAsRawFloat_absIn[13] ? 4'h2 : intAsRawFloat_absIn[12] ? 4'h3 : intAsRawFloat_absIn[11] ? 4'h4 : intAsRawFloat_absIn[10] ? 4'h5 : intAsRawFloat_absIn[9] ? 4'h6 : intAsRawFloat_absIn[8] ? 4'h7 : intAsRawFloat_absIn[7] ? 4'h8 : intAsRawFloat_absIn[6] ? 4'h9 : intAsRawFloat_absIn[5] ? 4'hA : intAsRawFloat_absIn[4] ? 4'hB : intAsRawFloat_absIn[3] ? 4'hC : intAsRawFloat_absIn[2] ? 4'hD : {3'h7, ~(intAsRawFloat_absIn[1])}; // @[Mux.scala:50:70] wire [30:0] _intAsRawFloat_sig_T = {15'h0, intAsRawFloat_absIn} << intAsRawFloat_adjustedNormDist; // @[Mux.scala:50:70] RoundAnyRawFNToRecFN_ie5_is16_oe8_os24 roundAnyRawFNToRecFN ( // @[INToRecFN.scala:60:15] .io_in_isZero (~(_intAsRawFloat_sig_T[15])), // @[rawFloatFromIN.scala:56:{22,41}, :62:{23,28}] .io_in_sign (intAsRawFloat_sign), // @[rawFloatFromIN.scala:51:29] .io_in_sExp ({3'h2, ~intAsRawFloat_adjustedNormDist}), // @[Mux.scala:50:70] .io_in_sig ({1'h0, _intAsRawFloat_sig_T[15:0]}), // @[rawFloatFromIN.scala:52:31, :56:{22,41}, :65:20] .io_out (io_out) ); // @[INToRecFN.scala:60:15] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w1_d3_i0_173( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7] wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire io_q_0; // @[SynchronizerReg.scala:80:7] assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_313 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_1), // @[SynchronizerReg.scala:87:41] .io_q (output_0) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File RoundAnyRawFNToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util.Fill import consts._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundAnyRawFNToRecFN( inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int, options: Int ) extends RawModule { override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(inExpWidth, inSigWidth)) // (allowed exponent range has limits) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((outExpWidth + outSigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0) val effectiveInSigWidth = if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1 val neverUnderflows = ((options & (flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact) ) != 0) || (inExpWidth < outExpWidth) val neverOverflows = ((options & flRoundOpt_neverOverflows) != 0) || (inExpWidth < outExpWidth) val outNaNExp = BigInt(7)<<(outExpWidth - 2) val outInfExp = BigInt(6)<<(outExpWidth - 2) val outMaxFiniteExp = outInfExp - 1 val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2 val outMinNonzeroExp = outMinNormExp - outSigWidth + 1 //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundingMode_near_even = (io.roundingMode === round_near_even) val roundingMode_minMag = (io.roundingMode === round_minMag) val roundingMode_min = (io.roundingMode === round_min) val roundingMode_max = (io.roundingMode === round_max) val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag) val roundingMode_odd = (io.roundingMode === round_odd) val roundMagUp = (roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sAdjustedExp = if (inExpWidth < outExpWidth) (io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S )(outExpWidth, 0).zext else if (inExpWidth == outExpWidth) io.in.sExp else io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S val adjustedSig = if (inSigWidth <= outSigWidth + 2) io.in.sig<<(outSigWidth - inSigWidth + 2) else (io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ## io.in.sig(inSigWidth - outSigWidth - 2, 0).orR ) val doShiftSigDown1 = if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2) val common_expOut = Wire(UInt((outExpWidth + 1).W)) val common_fractOut = Wire(UInt((outSigWidth - 1).W)) val common_overflow = Wire(Bool()) val common_totalUnderflow = Wire(Bool()) val common_underflow = Wire(Bool()) val common_inexact = Wire(Bool()) if ( neverOverflows && neverUnderflows && (effectiveInSigWidth <= outSigWidth) ) { //-------------------------------------------------------------------- //-------------------------------------------------------------------- common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1 common_fractOut := Mux(doShiftSigDown1, adjustedSig(outSigWidth + 1, 3), adjustedSig(outSigWidth, 2) ) common_overflow := false.B common_totalUnderflow := false.B common_underflow := false.B common_inexact := false.B } else { //-------------------------------------------------------------------- //-------------------------------------------------------------------- val roundMask = if (neverUnderflows) 0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W) else (lowMask( sAdjustedExp(outExpWidth, 0), outMinNormExp - outSigWidth - 1, outMinNormExp ) | doShiftSigDown1) ## 3.U(2.W) val shiftedRoundMask = 0.U(1.W) ## roundMask>>1 val roundPosMask = ~shiftedRoundMask & roundMask val roundPosBit = (adjustedSig & roundPosMask).orR val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR val anyRound = roundPosBit || anyRoundExtra val roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && roundPosBit) || (roundMagUp && anyRound) val roundedSig: Bits = Mux(roundIncr, (((adjustedSig | roundMask)>>2) +& 1.U) & ~Mux(roundingMode_near_even && roundPosBit && ! anyRoundExtra, roundMask>>1, 0.U((outSigWidth + 2).W) ), (adjustedSig & ~roundMask)>>2 | Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U) ) //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext common_expOut := sRoundedExp(outExpWidth, 0) common_fractOut := Mux(doShiftSigDown1, roundedSig(outSigWidth - 1, 1), roundedSig(outSigWidth - 2, 0) ) common_overflow := (if (neverOverflows) false.B else //*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?: (sRoundedExp>>(outExpWidth - 1) >= 3.S)) common_totalUnderflow := (if (neverUnderflows) false.B else //*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?: (sRoundedExp < outMinNonzeroExp.S)) val unboundedRange_roundPosBit = Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1)) val unboundedRange_anyRound = (doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR val unboundedRange_roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && unboundedRange_roundPosBit) || (roundMagUp && unboundedRange_anyRound) val roundCarry = Mux(doShiftSigDown1, roundedSig(outSigWidth + 1), roundedSig(outSigWidth) ) common_underflow := (if (neverUnderflows) false.B else common_totalUnderflow || //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? (anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) && Mux(doShiftSigDown1, roundMask(3), roundMask(2)) && ! ((io.detectTininess === tininess_afterRounding) && ! Mux(doShiftSigDown1, roundMask(4), roundMask(3) ) && roundCarry && roundPosBit && unboundedRange_roundIncr))) common_inexact := common_totalUnderflow || anyRound } //------------------------------------------------------------------------ //------------------------------------------------------------------------ val isNaNOut = io.invalidExc || io.in.isNaN val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero val overflow = commonCase && common_overflow val underflow = commonCase && common_underflow val inexact = overflow || (commonCase && common_inexact) val overflow_roundMagUp = roundingMode_near_even || roundingMode_near_maxMag || roundMagUp val pegMinNonzeroMagOut = commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd) val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp val notNaN_isInfOut = notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp) val signOut = Mux(isNaNOut, false.B, io.in.sign) val expOut = (common_expOut & ~Mux(io.in.isZero || common_totalUnderflow, (BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMinNonzeroMagOut, ~outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMaxFiniteMagOut, (BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W), 0.U ) & ~Mux(notNaN_isInfOut, (BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U )) | Mux(pegMinNonzeroMagOut, outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) | Mux(pegMaxFiniteMagOut, outMaxFiniteExp.U((outExpWidth + 1).W), 0.U ) | Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) | Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U) val fractOut = Mux(isNaNOut || io.in.isZero || common_totalUnderflow, Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U), common_fractOut ) | Fill(outSigWidth - 1, pegMaxFiniteMagOut) io.out := signOut ## expOut ## fractOut io.exceptionFlags := io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int) extends RawModule { override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(expWidth, sigWidth + 2)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( expWidth, sigWidth + 2, expWidth, sigWidth, options)) roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc roundAnyRawFNToRecFN.io.in := io.in roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags }
module RoundAnyRawFNToRecFN_ie7_is64_oe5_os11_4( // @[RoundAnyRawFNToRecFN.scala:48:5] input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:58:16] input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:58:16] input [8:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:58:16] input [64:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:58:16] input [2:0] io_roundingMode, // @[RoundAnyRawFNToRecFN.scala:58:16] output [16:0] io_out, // @[RoundAnyRawFNToRecFN.scala:58:16] output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:58:16] ); wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [8:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [64:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [2:0] io_roundingMode_0 = io_roundingMode; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [11:0] _roundMask_T = 12'h0; // @[RoundAnyRawFNToRecFN.scala:153:36] wire [5:0] _expOut_T_4 = 6'h37; // @[RoundAnyRawFNToRecFN.scala:258:19] wire [13:0] roundMask = 14'h3; // @[RoundAnyRawFNToRecFN.scala:153:55] wire [14:0] _shiftedRoundMask_T = 15'h3; // @[RoundAnyRawFNToRecFN.scala:162:41] wire [13:0] shiftedRoundMask = 14'h1; // @[RoundAnyRawFNToRecFN.scala:162:53] wire [13:0] _roundPosMask_T = 14'h3FFE; // @[RoundAnyRawFNToRecFN.scala:163:28] wire [13:0] roundPosMask = 14'h2; // @[RoundAnyRawFNToRecFN.scala:163:46] wire [13:0] _roundedSig_T_10 = 14'h3FFC; // @[RoundAnyRawFNToRecFN.scala:180:32] wire [12:0] _roundedSig_T_6 = 13'h1; // @[RoundAnyRawFNToRecFN.scala:177:35, :181:67] wire [12:0] _roundedSig_T_14 = 13'h1; // @[RoundAnyRawFNToRecFN.scala:177:35, :181:67] wire [5:0] _expOut_T_6 = 6'h3F; // @[RoundAnyRawFNToRecFN.scala:257:14] wire [5:0] _expOut_T_5 = 6'h0; // @[RoundAnyRawFNToRecFN.scala:257:18] wire [5:0] _expOut_T_14 = 6'h0; // @[RoundAnyRawFNToRecFN.scala:269:16] wire [5:0] _expOut_T_20 = 6'h0; // @[RoundAnyRawFNToRecFN.scala:278:16] wire [9:0] _fractOut_T_2 = 10'h0; // @[RoundAnyRawFNToRecFN.scala:281:16] wire [1:0] _io_exceptionFlags_T = 2'h0; // @[RoundAnyRawFNToRecFN.scala:288:23] wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5] wire _commonCase_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:237:22] wire _commonCase_T_1 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:237:36] wire _commonCase_T_2 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:237:33] wire io_invalidExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_isNaN = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_isInf = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire common_totalUnderflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:125:37] wire common_underflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:126:37] wire _unboundedRange_anyRound_T_1 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:205:30] wire isNaNOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:235:34] wire notNaN_isSpecialInfOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:236:49] wire underflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:239:32] wire _pegMinNonzeroMagOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:20] wire pegMinNonzeroMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:45] wire _expOut_T = io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :253:32] wire _fractOut_T = io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :280:22] wire signOut = io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :250:22] wire [16:0] _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:286:33] wire [4:0] _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:288:66] wire [16:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire roundingMode_near_even = io_roundingMode_0 == 3'h0; // @[RoundAnyRawFNToRecFN.scala:48:5, :90:53] wire roundingMode_minMag = io_roundingMode_0 == 3'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :91:53] wire roundingMode_min = io_roundingMode_0 == 3'h2; // @[RoundAnyRawFNToRecFN.scala:48:5, :92:53] wire roundingMode_max = io_roundingMode_0 == 3'h3; // @[RoundAnyRawFNToRecFN.scala:48:5, :93:53] wire roundingMode_near_maxMag = io_roundingMode_0 == 3'h4; // @[RoundAnyRawFNToRecFN.scala:48:5, :94:53] wire roundingMode_odd = io_roundingMode_0 == 3'h6; // @[RoundAnyRawFNToRecFN.scala:48:5, :95:53] wire _roundMagUp_T = roundingMode_min & io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :92:53, :98:27] wire _roundMagUp_T_1 = ~io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :98:66] wire _roundMagUp_T_2 = roundingMode_max & _roundMagUp_T_1; // @[RoundAnyRawFNToRecFN.scala:93:53, :98:{63,66}] wire roundMagUp = _roundMagUp_T | _roundMagUp_T_2; // @[RoundAnyRawFNToRecFN.scala:98:{27,42,63}] wire [9:0] sAdjustedExp = {io_in_sExp_0[8], io_in_sExp_0} - 10'h60; // @[RoundAnyRawFNToRecFN.scala:48:5, :110:24] wire [12:0] _adjustedSig_T = io_in_sig_0[64:52]; // @[RoundAnyRawFNToRecFN.scala:48:5, :116:23] wire [51:0] _adjustedSig_T_1 = io_in_sig_0[51:0]; // @[RoundAnyRawFNToRecFN.scala:48:5, :117:26] wire _adjustedSig_T_2 = |_adjustedSig_T_1; // @[RoundAnyRawFNToRecFN.scala:117:{26,60}] wire [13:0] adjustedSig = {_adjustedSig_T, _adjustedSig_T_2}; // @[RoundAnyRawFNToRecFN.scala:116:{23,66}, :117:60] wire [5:0] _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:187:37] wire [5:0] common_expOut; // @[RoundAnyRawFNToRecFN.scala:122:31] wire [9:0] _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:189:16] wire [9:0] common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31] wire _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:196:50] wire common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37] wire _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:230:49] wire common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37] wire [13:0] _roundPosBit_T = adjustedSig & 14'h2; // @[RoundAnyRawFNToRecFN.scala:116:66, :163:46, :164:40] wire roundPosBit = |_roundPosBit_T; // @[RoundAnyRawFNToRecFN.scala:164:{40,56}] wire [13:0] _anyRoundExtra_T = adjustedSig & 14'h1; // @[RoundAnyRawFNToRecFN.scala:116:66, :162:53, :165:42] wire anyRoundExtra = |_anyRoundExtra_T; // @[RoundAnyRawFNToRecFN.scala:165:{42,62}] wire anyRound = roundPosBit | anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:164:56, :165:62, :166:36] assign _common_inexact_T = anyRound; // @[RoundAnyRawFNToRecFN.scala:166:36, :230:49] wire _GEN = roundingMode_near_even | roundingMode_near_maxMag; // @[RoundAnyRawFNToRecFN.scala:90:53, :94:53, :169:38] wire _roundIncr_T; // @[RoundAnyRawFNToRecFN.scala:169:38] assign _roundIncr_T = _GEN; // @[RoundAnyRawFNToRecFN.scala:169:38] wire _unboundedRange_roundIncr_T; // @[RoundAnyRawFNToRecFN.scala:207:38] assign _unboundedRange_roundIncr_T = _GEN; // @[RoundAnyRawFNToRecFN.scala:169:38, :207:38] wire _overflow_roundMagUp_T; // @[RoundAnyRawFNToRecFN.scala:243:32] assign _overflow_roundMagUp_T = _GEN; // @[RoundAnyRawFNToRecFN.scala:169:38, :243:32] wire _roundIncr_T_1 = _roundIncr_T & roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :169:{38,67}] wire _roundIncr_T_2 = roundMagUp & anyRound; // @[RoundAnyRawFNToRecFN.scala:98:42, :166:36, :171:29] wire roundIncr = _roundIncr_T_1 | _roundIncr_T_2; // @[RoundAnyRawFNToRecFN.scala:169:67, :170:31, :171:29] wire [13:0] _roundedSig_T = adjustedSig | 14'h3; // @[RoundAnyRawFNToRecFN.scala:116:66, :153:55, :174:32] wire [11:0] _roundedSig_T_1 = _roundedSig_T[13:2]; // @[RoundAnyRawFNToRecFN.scala:174:{32,44}] wire [12:0] _roundedSig_T_2 = {1'h0, _roundedSig_T_1} + 13'h1; // @[RoundAnyRawFNToRecFN.scala:174:{44,49}, :177:35, :181:67] wire _roundedSig_T_3 = roundingMode_near_even & roundPosBit; // @[RoundAnyRawFNToRecFN.scala:90:53, :164:56, :175:49] wire _roundedSig_T_4 = ~anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:165:62, :176:30] wire _roundedSig_T_5 = _roundedSig_T_3 & _roundedSig_T_4; // @[RoundAnyRawFNToRecFN.scala:175:{49,64}, :176:30] wire [12:0] _roundedSig_T_7 = {12'h0, _roundedSig_T_5}; // @[RoundAnyRawFNToRecFN.scala:175:{25,64}] wire [12:0] _roundedSig_T_8 = ~_roundedSig_T_7; // @[RoundAnyRawFNToRecFN.scala:175:{21,25}] wire [12:0] _roundedSig_T_9 = _roundedSig_T_2 & _roundedSig_T_8; // @[RoundAnyRawFNToRecFN.scala:174:{49,57}, :175:21] wire [13:0] _roundedSig_T_11 = adjustedSig & 14'h3FFC; // @[RoundAnyRawFNToRecFN.scala:116:66, :180:{30,32}] wire [11:0] _roundedSig_T_12 = _roundedSig_T_11[13:2]; // @[RoundAnyRawFNToRecFN.scala:180:{30,43}] wire _roundedSig_T_13 = roundingMode_odd & anyRound; // @[RoundAnyRawFNToRecFN.scala:95:53, :166:36, :181:42] wire [12:0] _roundedSig_T_15 = {12'h0, _roundedSig_T_13}; // @[RoundAnyRawFNToRecFN.scala:181:{24,42}] wire [12:0] _roundedSig_T_16 = {1'h0, _roundedSig_T_12} | _roundedSig_T_15; // @[RoundAnyRawFNToRecFN.scala:180:{43,47}, :181:24] wire [12:0] roundedSig = roundIncr ? _roundedSig_T_9 : _roundedSig_T_16; // @[RoundAnyRawFNToRecFN.scala:170:31, :173:16, :174:57, :180:47] wire [1:0] _sRoundedExp_T = roundedSig[12:11]; // @[RoundAnyRawFNToRecFN.scala:173:16, :185:54] wire [2:0] _sRoundedExp_T_1 = {1'h0, _sRoundedExp_T}; // @[RoundAnyRawFNToRecFN.scala:185:{54,76}] wire [10:0] sRoundedExp = {sAdjustedExp[9], sAdjustedExp} + {{8{_sRoundedExp_T_1[2]}}, _sRoundedExp_T_1}; // @[RoundAnyRawFNToRecFN.scala:110:24, :185:{40,76}] assign _common_expOut_T = sRoundedExp[5:0]; // @[RoundAnyRawFNToRecFN.scala:185:40, :187:37] assign common_expOut = _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:122:31, :187:37] wire [9:0] _common_fractOut_T = roundedSig[10:1]; // @[RoundAnyRawFNToRecFN.scala:173:16, :190:27] wire [9:0] _common_fractOut_T_1 = roundedSig[9:0]; // @[RoundAnyRawFNToRecFN.scala:173:16, :191:27] assign _common_fractOut_T_2 = _common_fractOut_T_1; // @[RoundAnyRawFNToRecFN.scala:189:16, :191:27] assign common_fractOut = _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:123:31, :189:16] wire [6:0] _common_overflow_T = sRoundedExp[10:4]; // @[RoundAnyRawFNToRecFN.scala:185:40, :196:30] assign _common_overflow_T_1 = $signed(_common_overflow_T) > 7'sh2; // @[RoundAnyRawFNToRecFN.scala:196:{30,50}] assign common_overflow = _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:124:37, :196:50] wire _unboundedRange_roundPosBit_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:116:66, :203:45] wire _unboundedRange_anyRound_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:116:66, :203:45, :205:44] wire _unboundedRange_roundPosBit_T_1 = adjustedSig[1]; // @[RoundAnyRawFNToRecFN.scala:116:66, :203:61] wire unboundedRange_roundPosBit = _unboundedRange_roundPosBit_T_1; // @[RoundAnyRawFNToRecFN.scala:203:{16,61}] wire [1:0] _unboundedRange_anyRound_T_2 = adjustedSig[1:0]; // @[RoundAnyRawFNToRecFN.scala:116:66, :205:63] wire _unboundedRange_anyRound_T_3 = |_unboundedRange_anyRound_T_2; // @[RoundAnyRawFNToRecFN.scala:205:{63,70}] wire unboundedRange_anyRound = _unboundedRange_anyRound_T_3; // @[RoundAnyRawFNToRecFN.scala:205:{49,70}] wire _unboundedRange_roundIncr_T_1 = _unboundedRange_roundIncr_T & unboundedRange_roundPosBit; // @[RoundAnyRawFNToRecFN.scala:203:16, :207:{38,67}] wire _unboundedRange_roundIncr_T_2 = roundMagUp & unboundedRange_anyRound; // @[RoundAnyRawFNToRecFN.scala:98:42, :205:49, :209:29] wire unboundedRange_roundIncr = _unboundedRange_roundIncr_T_1 | _unboundedRange_roundIncr_T_2; // @[RoundAnyRawFNToRecFN.scala:207:67, :208:46, :209:29] wire _roundCarry_T = roundedSig[12]; // @[RoundAnyRawFNToRecFN.scala:173:16, :212:27] wire _roundCarry_T_1 = roundedSig[11]; // @[RoundAnyRawFNToRecFN.scala:173:16, :213:27] wire roundCarry = _roundCarry_T_1; // @[RoundAnyRawFNToRecFN.scala:211:16, :213:27] assign common_inexact = _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:127:37, :230:49] wire _commonCase_T_3 = ~io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :237:64] wire commonCase = _commonCase_T_3; // @[RoundAnyRawFNToRecFN.scala:237:{61,64}] wire overflow = commonCase & common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37, :237:61, :238:32] wire _inexact_T = commonCase & common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37, :237:61, :240:43] wire inexact = overflow | _inexact_T; // @[RoundAnyRawFNToRecFN.scala:238:32, :240:{28,43}] wire overflow_roundMagUp = _overflow_roundMagUp_T | roundMagUp; // @[RoundAnyRawFNToRecFN.scala:98:42, :243:{32,60}] wire _pegMinNonzeroMagOut_T_1 = roundMagUp | roundingMode_odd; // @[RoundAnyRawFNToRecFN.scala:95:53, :98:42, :245:60] wire _pegMaxFiniteMagOut_T = ~overflow_roundMagUp; // @[RoundAnyRawFNToRecFN.scala:243:60, :246:42] wire pegMaxFiniteMagOut = overflow & _pegMaxFiniteMagOut_T; // @[RoundAnyRawFNToRecFN.scala:238:32, :246:{39,42}] wire _notNaN_isInfOut_T = overflow & overflow_roundMagUp; // @[RoundAnyRawFNToRecFN.scala:238:32, :243:60, :248:45] wire notNaN_isInfOut = _notNaN_isInfOut_T; // @[RoundAnyRawFNToRecFN.scala:248:{32,45}] wire [5:0] _expOut_T_1 = _expOut_T ? 6'h38 : 6'h0; // @[RoundAnyRawFNToRecFN.scala:253:{18,32}] wire [5:0] _expOut_T_2 = ~_expOut_T_1; // @[RoundAnyRawFNToRecFN.scala:253:{14,18}] wire [5:0] _expOut_T_3 = common_expOut & _expOut_T_2; // @[RoundAnyRawFNToRecFN.scala:122:31, :252:24, :253:14] wire [5:0] _expOut_T_7 = _expOut_T_3; // @[RoundAnyRawFNToRecFN.scala:252:24, :256:17] wire [5:0] _expOut_T_8 = {1'h0, pegMaxFiniteMagOut, 4'h0}; // @[RoundAnyRawFNToRecFN.scala:246:39, :261:18] wire [5:0] _expOut_T_9 = ~_expOut_T_8; // @[RoundAnyRawFNToRecFN.scala:261:{14,18}] wire [5:0] _expOut_T_10 = _expOut_T_7 & _expOut_T_9; // @[RoundAnyRawFNToRecFN.scala:256:17, :260:17, :261:14] wire [5:0] _expOut_T_11 = {2'h0, notNaN_isInfOut, 3'h0}; // @[RoundAnyRawFNToRecFN.scala:248:32, :265:18] wire [5:0] _expOut_T_12 = ~_expOut_T_11; // @[RoundAnyRawFNToRecFN.scala:265:{14,18}] wire [5:0] _expOut_T_13 = _expOut_T_10 & _expOut_T_12; // @[RoundAnyRawFNToRecFN.scala:260:17, :264:17, :265:14] wire [5:0] _expOut_T_15 = _expOut_T_13; // @[RoundAnyRawFNToRecFN.scala:264:17, :268:18] wire [5:0] _expOut_T_16 = pegMaxFiniteMagOut ? 6'h2F : 6'h0; // @[RoundAnyRawFNToRecFN.scala:246:39, :273:16] wire [5:0] _expOut_T_17 = _expOut_T_15 | _expOut_T_16; // @[RoundAnyRawFNToRecFN.scala:268:18, :272:15, :273:16] wire [5:0] _expOut_T_18 = notNaN_isInfOut ? 6'h30 : 6'h0; // @[RoundAnyRawFNToRecFN.scala:248:32, :277:16] wire [5:0] _expOut_T_19 = _expOut_T_17 | _expOut_T_18; // @[RoundAnyRawFNToRecFN.scala:272:15, :276:15, :277:16] wire [5:0] expOut = _expOut_T_19; // @[RoundAnyRawFNToRecFN.scala:276:15, :277:73] wire _fractOut_T_1 = _fractOut_T; // @[RoundAnyRawFNToRecFN.scala:280:{22,38}] wire [9:0] _fractOut_T_3 = _fractOut_T_1 ? 10'h0 : common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31, :280:{12,38}] wire [9:0] _fractOut_T_4 = {10{pegMaxFiniteMagOut}}; // @[RoundAnyRawFNToRecFN.scala:246:39, :284:13] wire [9:0] fractOut = _fractOut_T_3 | _fractOut_T_4; // @[RoundAnyRawFNToRecFN.scala:280:12, :283:11, :284:13] wire [6:0] _io_out_T = {signOut, expOut}; // @[RoundAnyRawFNToRecFN.scala:250:22, :277:73, :286:23] assign _io_out_T_1 = {_io_out_T, fractOut}; // @[RoundAnyRawFNToRecFN.scala:283:11, :286:{23,33}] assign io_out_0 = _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:48:5, :286:33] wire [2:0] _io_exceptionFlags_T_1 = {2'h0, overflow}; // @[RoundAnyRawFNToRecFN.scala:238:32, :288:41] wire [3:0] _io_exceptionFlags_T_2 = {_io_exceptionFlags_T_1, 1'h0}; // @[RoundAnyRawFNToRecFN.scala:288:{41,53}] assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, inexact}; // @[RoundAnyRawFNToRecFN.scala:240:28, :288:{53,66}] assign io_exceptionFlags_0 = _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:66] assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5] assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncQueueSource_DebugInternalBundle( // @[AsyncQueue.scala:70:7] input clock, // @[AsyncQueue.scala:70:7] input reset, // @[AsyncQueue.scala:70:7] output io_enq_ready, // @[AsyncQueue.scala:73:14] input io_enq_valid, // @[AsyncQueue.scala:73:14] input io_enq_bits_resumereq, // @[AsyncQueue.scala:73:14] input [9:0] io_enq_bits_hartsel, // @[AsyncQueue.scala:73:14] input io_enq_bits_ackhavereset, // @[AsyncQueue.scala:73:14] input io_enq_bits_hasel, // @[AsyncQueue.scala:73:14] input io_enq_bits_hamask_0, // @[AsyncQueue.scala:73:14] input io_enq_bits_hamask_1, // @[AsyncQueue.scala:73:14] input io_enq_bits_hamask_2, // @[AsyncQueue.scala:73:14] input io_enq_bits_hamask_3, // @[AsyncQueue.scala:73:14] input io_enq_bits_hamask_4, // @[AsyncQueue.scala:73:14] input io_enq_bits_hamask_5, // @[AsyncQueue.scala:73:14] input io_enq_bits_hamask_6, // @[AsyncQueue.scala:73:14] input io_enq_bits_hamask_7, // @[AsyncQueue.scala:73:14] input io_enq_bits_hamask_8, // @[AsyncQueue.scala:73:14] input io_enq_bits_hamask_9, // @[AsyncQueue.scala:73:14] input io_enq_bits_hamask_10, // @[AsyncQueue.scala:73:14] input io_enq_bits_hamask_11, // @[AsyncQueue.scala:73:14] input io_enq_bits_hrmask_0, // @[AsyncQueue.scala:73:14] input io_enq_bits_hrmask_1, // @[AsyncQueue.scala:73:14] input io_enq_bits_hrmask_2, // @[AsyncQueue.scala:73:14] input io_enq_bits_hrmask_3, // @[AsyncQueue.scala:73:14] input io_enq_bits_hrmask_4, // @[AsyncQueue.scala:73:14] input io_enq_bits_hrmask_5, // @[AsyncQueue.scala:73:14] input io_enq_bits_hrmask_6, // @[AsyncQueue.scala:73:14] input io_enq_bits_hrmask_7, // @[AsyncQueue.scala:73:14] input io_enq_bits_hrmask_8, // @[AsyncQueue.scala:73:14] input io_enq_bits_hrmask_9, // @[AsyncQueue.scala:73:14] input io_enq_bits_hrmask_10, // @[AsyncQueue.scala:73:14] input io_enq_bits_hrmask_11, // @[AsyncQueue.scala:73:14] output io_async_mem_0_resumereq, // @[AsyncQueue.scala:73:14] output [9:0] io_async_mem_0_hartsel, // @[AsyncQueue.scala:73:14] output io_async_mem_0_ackhavereset, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hasel, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hamask_0, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hamask_1, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hamask_2, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hamask_3, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hamask_4, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hamask_5, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hamask_6, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hamask_7, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hamask_8, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hamask_9, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hamask_10, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hamask_11, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hrmask_0, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hrmask_1, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hrmask_2, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hrmask_3, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hrmask_4, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hrmask_5, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hrmask_6, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hrmask_7, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hrmask_8, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hrmask_9, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hrmask_10, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hrmask_11, // @[AsyncQueue.scala:73:14] input io_async_ridx, // @[AsyncQueue.scala:73:14] output io_async_widx, // @[AsyncQueue.scala:73:14] input io_async_safe_ridx_valid, // @[AsyncQueue.scala:73:14] output io_async_safe_widx_valid, // @[AsyncQueue.scala:73:14] output io_async_safe_source_reset_n, // @[AsyncQueue.scala:73:14] input io_async_safe_sink_reset_n // @[AsyncQueue.scala:73:14] ); wire io_enq_ready_0; // @[AsyncQueue.scala:91:29] wire _sink_valid_io_out; // @[AsyncQueue.scala:106:30] wire _sink_extend_io_out; // @[AsyncQueue.scala:105:30] wire _source_valid_0_io_out; // @[AsyncQueue.scala:102:32] wire _ridx_ridx_gray_io_q; // @[ShiftReg.scala:45:23] reg mem_0_resumereq; // @[AsyncQueue.scala:82:16] reg [9:0] mem_0_hartsel; // @[AsyncQueue.scala:82:16] reg mem_0_ackhavereset; // @[AsyncQueue.scala:82:16] reg mem_0_hasel; // @[AsyncQueue.scala:82:16] reg mem_0_hamask_0; // @[AsyncQueue.scala:82:16] reg mem_0_hamask_1; // @[AsyncQueue.scala:82:16] reg mem_0_hamask_2; // @[AsyncQueue.scala:82:16] reg mem_0_hamask_3; // @[AsyncQueue.scala:82:16] reg mem_0_hamask_4; // @[AsyncQueue.scala:82:16] reg mem_0_hamask_5; // @[AsyncQueue.scala:82:16] reg mem_0_hamask_6; // @[AsyncQueue.scala:82:16] reg mem_0_hamask_7; // @[AsyncQueue.scala:82:16] reg mem_0_hamask_8; // @[AsyncQueue.scala:82:16] reg mem_0_hamask_9; // @[AsyncQueue.scala:82:16] reg mem_0_hamask_10; // @[AsyncQueue.scala:82:16] reg mem_0_hamask_11; // @[AsyncQueue.scala:82:16] reg mem_0_hrmask_0; // @[AsyncQueue.scala:82:16] reg mem_0_hrmask_1; // @[AsyncQueue.scala:82:16] reg mem_0_hrmask_2; // @[AsyncQueue.scala:82:16] reg mem_0_hrmask_3; // @[AsyncQueue.scala:82:16] reg mem_0_hrmask_4; // @[AsyncQueue.scala:82:16] reg mem_0_hrmask_5; // @[AsyncQueue.scala:82:16] reg mem_0_hrmask_6; // @[AsyncQueue.scala:82:16] reg mem_0_hrmask_7; // @[AsyncQueue.scala:82:16] reg mem_0_hrmask_8; // @[AsyncQueue.scala:82:16] reg mem_0_hrmask_9; // @[AsyncQueue.scala:82:16] reg mem_0_hrmask_10; // @[AsyncQueue.scala:82:16] reg mem_0_hrmask_11; // @[AsyncQueue.scala:82:16] wire _widx_T_1 = io_enq_ready_0 & io_enq_valid; // @[Decoupled.scala:51:35] reg widx_widx_bin; // @[AsyncQueue.scala:52:25] reg ready_reg; // @[AsyncQueue.scala:90:56] assign io_enq_ready_0 = ready_reg & _sink_valid_io_out; // @[AsyncQueue.scala:90:56, :91:29, :106:30] reg widx_gray; // @[AsyncQueue.scala:93:55] always @(posedge clock) begin // @[AsyncQueue.scala:70:7] if (_widx_T_1) begin // @[Decoupled.scala:51:35] mem_0_resumereq <= io_enq_bits_resumereq; // @[AsyncQueue.scala:82:16] mem_0_hartsel <= io_enq_bits_hartsel; // @[AsyncQueue.scala:82:16] mem_0_ackhavereset <= io_enq_bits_ackhavereset; // @[AsyncQueue.scala:82:16] mem_0_hasel <= io_enq_bits_hasel; // @[AsyncQueue.scala:82:16] mem_0_hamask_0 <= io_enq_bits_hamask_0; // @[AsyncQueue.scala:82:16] mem_0_hamask_1 <= io_enq_bits_hamask_1; // @[AsyncQueue.scala:82:16] mem_0_hamask_2 <= io_enq_bits_hamask_2; // @[AsyncQueue.scala:82:16] mem_0_hamask_3 <= io_enq_bits_hamask_3; // @[AsyncQueue.scala:82:16] mem_0_hamask_4 <= io_enq_bits_hamask_4; // @[AsyncQueue.scala:82:16] mem_0_hamask_5 <= io_enq_bits_hamask_5; // @[AsyncQueue.scala:82:16] mem_0_hamask_6 <= io_enq_bits_hamask_6; // @[AsyncQueue.scala:82:16] mem_0_hamask_7 <= io_enq_bits_hamask_7; // @[AsyncQueue.scala:82:16] mem_0_hamask_8 <= io_enq_bits_hamask_8; // @[AsyncQueue.scala:82:16] mem_0_hamask_9 <= io_enq_bits_hamask_9; // @[AsyncQueue.scala:82:16] mem_0_hamask_10 <= io_enq_bits_hamask_10; // @[AsyncQueue.scala:82:16] mem_0_hamask_11 <= io_enq_bits_hamask_11; // @[AsyncQueue.scala:82:16] mem_0_hrmask_0 <= io_enq_bits_hrmask_0; // @[AsyncQueue.scala:82:16] mem_0_hrmask_1 <= io_enq_bits_hrmask_1; // @[AsyncQueue.scala:82:16] mem_0_hrmask_2 <= io_enq_bits_hrmask_2; // @[AsyncQueue.scala:82:16] mem_0_hrmask_3 <= io_enq_bits_hrmask_3; // @[AsyncQueue.scala:82:16] mem_0_hrmask_4 <= io_enq_bits_hrmask_4; // @[AsyncQueue.scala:82:16] mem_0_hrmask_5 <= io_enq_bits_hrmask_5; // @[AsyncQueue.scala:82:16] mem_0_hrmask_6 <= io_enq_bits_hrmask_6; // @[AsyncQueue.scala:82:16] mem_0_hrmask_7 <= io_enq_bits_hrmask_7; // @[AsyncQueue.scala:82:16] mem_0_hrmask_8 <= io_enq_bits_hrmask_8; // @[AsyncQueue.scala:82:16] mem_0_hrmask_9 <= io_enq_bits_hrmask_9; // @[AsyncQueue.scala:82:16] mem_0_hrmask_10 <= io_enq_bits_hrmask_10; // @[AsyncQueue.scala:82:16] mem_0_hrmask_11 <= io_enq_bits_hrmask_11; // @[AsyncQueue.scala:82:16] end always @(posedge) wire widx = _sink_valid_io_out & widx_widx_bin + _widx_T_1; // @[Decoupled.scala:51:35] always @(posedge clock or posedge reset) begin // @[AsyncQueue.scala:70:7] if (reset) begin // @[AsyncQueue.scala:70:7] widx_widx_bin <= 1'h0; // @[AsyncQueue.scala:52:25, :70:7] ready_reg <= 1'h0; // @[AsyncQueue.scala:70:7, :90:56] widx_gray <= 1'h0; // @[AsyncQueue.scala:70:7, :93:55] end else begin // @[AsyncQueue.scala:70:7] widx_widx_bin <= widx; // @[AsyncQueue.scala:52:25, :53:23] ready_reg <= _sink_valid_io_out & widx != ~_ridx_ridx_gray_io_q; // @[ShiftReg.scala:45:23] widx_gray <= widx; // @[AsyncQueue.scala:53:23, :93:55] end always @(posedge, posedge)
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_165( // @[SynchronizerReg.scala:68:19] input clock, // @[SynchronizerReg.scala:68:19] input reset, // @[SynchronizerReg.scala:68:19] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19] wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19] wire io_q_0; // @[SynchronizerReg.scala:68:19] reg sync_0; // @[SynchronizerReg.scala:51:87] assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19] reg sync_1; // @[SynchronizerReg.scala:51:87] reg sync_2; // @[SynchronizerReg.scala:51:87] always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19] if (reset) begin // @[SynchronizerReg.scala:68:19] sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87] end else begin // @[SynchronizerReg.scala:68:19] sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87] sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87] sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22] end always @(posedge, posedge)
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_5( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [28:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [7:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [28:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [7:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7] wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59] wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27] wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25] wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_29 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_31 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_35 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_37 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_53 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_55 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_59 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_61 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_65 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_67 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_71 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_73 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_79 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_81 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_85 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_87 = 1'h1; // @[Parameters.scala:57:20] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28] wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [28:0] _c_first_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_first_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_first_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_first_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_set_wo_ready_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_set_wo_ready_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_opcodes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_opcodes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_sizes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_sizes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_opcodes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_opcodes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_sizes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_sizes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_probe_ack_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_probe_ack_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_probe_ack_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_probe_ack_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _same_cycle_resp_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _same_cycle_resp_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _same_cycle_resp_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _same_cycle_resp_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _same_cycle_resp_WIRE_4_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _same_cycle_resp_WIRE_5_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [7:0] _c_first_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_first_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_first_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_first_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_set_wo_ready_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_set_wo_ready_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_opcodes_set_interm_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_opcodes_set_interm_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_sizes_set_interm_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_sizes_set_interm_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_opcodes_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_opcodes_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_sizes_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_sizes_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_probe_ack_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_probe_ack_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_probe_ack_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_probe_ack_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _same_cycle_resp_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _same_cycle_resp_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _same_cycle_resp_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _same_cycle_resp_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _same_cycle_resp_WIRE_4_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _same_cycle_resp_WIRE_5_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [2050:0] _c_opcodes_set_T_1 = 2051'h0; // @[Monitor.scala:767:54] wire [2050:0] _c_sizes_set_T_1 = 2051'h0; // @[Monitor.scala:768:52] wire [10:0] _c_opcodes_set_T = 11'h0; // @[Monitor.scala:767:79] wire [10:0] _c_sizes_set_T = 11'h0; // @[Monitor.scala:768:77] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51] wire [255:0] _c_set_wo_ready_T = 256'h1; // @[OneHot.scala:58:35] wire [255:0] _c_set_T = 256'h1; // @[OneHot.scala:58:35] wire [515:0] c_opcodes_set = 516'h0; // @[Monitor.scala:740:34] wire [515:0] c_sizes_set = 516'h0; // @[Monitor.scala:741:34] wire [128:0] c_set = 129'h0; // @[Monitor.scala:738:34] wire [128:0] c_set_wo_ready = 129'h0; // @[Monitor.scala:739:34] wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46] wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76] wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48] wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire [7:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _source_ok_uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_54 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_55 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_56 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_57 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_58 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_59 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_60 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_61 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_62 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_63 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_64 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _uncommonBits_T_65 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _source_ok_uncommonBits_T_10 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [7:0] _source_ok_uncommonBits_T_11 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire _source_ok_T = io_in_a_bits_source_0 == 8'h50; // @[Monitor.scala:36:7] wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [5:0] _source_ok_T_1 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7] wire [5:0] _source_ok_T_7 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7] wire [5:0] _source_ok_T_13 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7] wire [5:0] _source_ok_T_19 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7] wire _source_ok_T_2 = _source_ok_T_1 == 6'h10; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_8 = _source_ok_T_7 == 6'h11; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_14 = _source_ok_T_13 == 6'h12; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_20 = _source_ok_T_19 == 6'h13; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31] wire _source_ok_T_25 = io_in_a_bits_source_0 == 8'h20; // @[Monitor.scala:36:7] wire _source_ok_WIRE_5 = _source_ok_T_25; // @[Parameters.scala:1138:31] wire _source_ok_T_26 = io_in_a_bits_source_0 == 8'h21; // @[Monitor.scala:36:7] wire _source_ok_WIRE_6 = _source_ok_T_26; // @[Parameters.scala:1138:31] wire [3:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[3:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _source_ok_T_27 = io_in_a_bits_source_0[7:4]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_33 = io_in_a_bits_source_0[7:4]; // @[Monitor.scala:36:7] wire _source_ok_T_28 = _source_ok_T_27 == 4'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_30 = _source_ok_T_28; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_32 = _source_ok_T_30; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_7 = _source_ok_T_32; // @[Parameters.scala:1138:31] wire [3:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[3:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_34 = _source_ok_T_33 == 4'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_36 = _source_ok_T_34; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_38 = _source_ok_T_36; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_8 = _source_ok_T_38; // @[Parameters.scala:1138:31] wire _source_ok_T_39 = io_in_a_bits_source_0 == 8'h22; // @[Monitor.scala:36:7] wire _source_ok_WIRE_9 = _source_ok_T_39; // @[Parameters.scala:1138:31] wire _source_ok_T_40 = io_in_a_bits_source_0 == 8'h80; // @[Monitor.scala:36:7] wire _source_ok_WIRE_10 = _source_ok_T_40; // @[Parameters.scala:1138:31] wire _source_ok_T_41 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_42 = _source_ok_T_41 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_43 = _source_ok_T_42 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_44 = _source_ok_T_43 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_45 = _source_ok_T_44 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_46 = _source_ok_T_45 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_47 = _source_ok_T_46 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_48 = _source_ok_T_47 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_49 = _source_ok_T_48 | _source_ok_WIRE_9; // @[Parameters.scala:1138:31, :1139:46] wire source_ok = _source_ok_T_49 | _source_ok_WIRE_10; // @[Parameters.scala:1138:31, :1139:46] wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71] wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [28:0] _is_aligned_T = {23'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 29'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_4 = _uncommonBits_T_4[3:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_5 = _uncommonBits_T_5[3:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_10 = _uncommonBits_T_10[3:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_11 = _uncommonBits_T_11[3:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_16 = _uncommonBits_T_16[3:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_17 = _uncommonBits_T_17[3:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_22 = _uncommonBits_T_22[3:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_23 = _uncommonBits_T_23[3:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_28 = _uncommonBits_T_28[3:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_29 = _uncommonBits_T_29[3:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_34 = _uncommonBits_T_34[3:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_35 = _uncommonBits_T_35[3:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_40 = _uncommonBits_T_40[3:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_41 = _uncommonBits_T_41[3:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_44 = _uncommonBits_T_44[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_46 = _uncommonBits_T_46[3:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_47 = _uncommonBits_T_47[3:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_48 = _uncommonBits_T_48[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_49 = _uncommonBits_T_49[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_52 = _uncommonBits_T_52[3:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_53 = _uncommonBits_T_53[3:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_54 = _uncommonBits_T_54[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_55 = _uncommonBits_T_55[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_56 = _uncommonBits_T_56[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_57 = _uncommonBits_T_57[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_58 = _uncommonBits_T_58[3:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_59 = _uncommonBits_T_59[3:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_60 = _uncommonBits_T_60[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_61 = _uncommonBits_T_61[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_62 = _uncommonBits_T_62[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_63 = _uncommonBits_T_63[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_64 = _uncommonBits_T_64[3:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] uncommonBits_65 = _uncommonBits_T_65[3:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_50 = io_in_d_bits_source_0 == 8'h50; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_0 = _source_ok_T_50; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire [5:0] _source_ok_T_51 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7] wire [5:0] _source_ok_T_57 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7] wire [5:0] _source_ok_T_63 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7] wire [5:0] _source_ok_T_69 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7] wire _source_ok_T_52 = _source_ok_T_51 == 6'h10; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_54 = _source_ok_T_52; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_56 = _source_ok_T_54; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_1 = _source_ok_T_56; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_58 = _source_ok_T_57 == 6'h11; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_60 = _source_ok_T_58; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_62 = _source_ok_T_60; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_2 = _source_ok_T_62; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_64 = _source_ok_T_63 == 6'h12; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_66 = _source_ok_T_64; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_68 = _source_ok_T_66; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_3 = _source_ok_T_68; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_70 = _source_ok_T_69 == 6'h13; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_72 = _source_ok_T_70; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_74 = _source_ok_T_72; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_4 = _source_ok_T_74; // @[Parameters.scala:1138:31] wire _source_ok_T_75 = io_in_d_bits_source_0 == 8'h20; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_5 = _source_ok_T_75; // @[Parameters.scala:1138:31] wire _source_ok_T_76 = io_in_d_bits_source_0 == 8'h21; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_6 = _source_ok_T_76; // @[Parameters.scala:1138:31] wire [3:0] source_ok_uncommonBits_10 = _source_ok_uncommonBits_T_10[3:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _source_ok_T_77 = io_in_d_bits_source_0[7:4]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_83 = io_in_d_bits_source_0[7:4]; // @[Monitor.scala:36:7] wire _source_ok_T_78 = _source_ok_T_77 == 4'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_80 = _source_ok_T_78; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_82 = _source_ok_T_80; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_7 = _source_ok_T_82; // @[Parameters.scala:1138:31] wire [3:0] source_ok_uncommonBits_11 = _source_ok_uncommonBits_T_11[3:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_84 = _source_ok_T_83 == 4'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_86 = _source_ok_T_84; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_88 = _source_ok_T_86; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_8 = _source_ok_T_88; // @[Parameters.scala:1138:31] wire _source_ok_T_89 = io_in_d_bits_source_0 == 8'h22; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_9 = _source_ok_T_89; // @[Parameters.scala:1138:31] wire _source_ok_T_90 = io_in_d_bits_source_0 == 8'h80; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_10 = _source_ok_T_90; // @[Parameters.scala:1138:31] wire _source_ok_T_91 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_92 = _source_ok_T_91 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_93 = _source_ok_T_92 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_94 = _source_ok_T_93 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_95 = _source_ok_T_94 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_96 = _source_ok_T_95 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_97 = _source_ok_T_96 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_98 = _source_ok_T_97 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_99 = _source_ok_T_98 | _source_ok_WIRE_1_9; // @[Parameters.scala:1138:31, :1139:46] wire source_ok_1 = _source_ok_T_99 | _source_ok_WIRE_1_10; // @[Parameters.scala:1138:31, :1139:46] wire _T_1333 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_1333; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_1333; // @[Decoupled.scala:51:35] wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [7:0] source; // @[Monitor.scala:390:22] reg [28:0] address; // @[Monitor.scala:391:22] wire _T_1406 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_1406; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_1406; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_1406; // @[Decoupled.scala:51:35] wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [7:0] source_1; // @[Monitor.scala:541:22] reg sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [128:0] inflight; // @[Monitor.scala:614:27] reg [515:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [515:0] inflight_sizes; // @[Monitor.scala:618:33] wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [128:0] a_set; // @[Monitor.scala:626:34] wire [128:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [515:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [515:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [10:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [10:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [10:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65] wire [10:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [10:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99] wire [10:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [10:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67] wire [10:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [10:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99] wire [515:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [515:0] _a_opcode_lookup_T_6 = {512'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [515:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[515:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [3:0] a_size_lookup; // @[Monitor.scala:639:33] wire [515:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [515:0] _a_size_lookup_T_6 = {512'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}] wire [515:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[515:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [255:0] _GEN_2 = 256'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [255:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35] wire [255:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[128:0] : 129'h0; // @[OneHot.scala:58:35] wire _T_1259 = _T_1333 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_1259 ? _a_set_T[128:0] : 129'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_1259 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_1259 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [10:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [10:0] _a_opcodes_set_T; // @[Monitor.scala:659:79] assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79] wire [10:0] _a_sizes_set_T; // @[Monitor.scala:660:77] assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77] wire [2050:0] _a_opcodes_set_T_1 = {2047'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_1259 ? _a_opcodes_set_T_1[515:0] : 516'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [2050:0] _a_sizes_set_T_1 = {2047'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_1259 ? _a_sizes_set_T_1[515:0] : 516'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [128:0] d_clr; // @[Monitor.scala:664:34] wire [128:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [515:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [515:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46] wire _T_1305 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [255:0] _GEN_5 = 256'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [255:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35] wire [255:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35] wire [255:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35] wire [255:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_1305 & ~d_release_ack ? _d_clr_wo_ready_T[128:0] : 129'h0; // @[OneHot.scala:58:35] wire _T_1274 = _T_1406 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_1274 ? _d_clr_T[128:0] : 129'h0; // @[OneHot.scala:58:35] wire [2062:0] _d_opcodes_clr_T_5 = 2063'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_1274 ? _d_opcodes_clr_T_5[515:0] : 516'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [2062:0] _d_sizes_clr_T_5 = 2063'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_1274 ? _d_sizes_clr_T_5[515:0] : 516'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [128:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [128:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [128:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [515:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [515:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [515:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [515:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [515:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [515:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [128:0] inflight_1; // @[Monitor.scala:726:35] wire [128:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [515:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [515:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [515:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [515:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [3:0] c_size_lookup; // @[Monitor.scala:748:35] wire [515:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [515:0] _c_opcode_lookup_T_6 = {512'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [515:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[515:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [515:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [515:0] _c_size_lookup_T_6 = {512'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}] wire [515:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[515:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [128:0] d_clr_1; // @[Monitor.scala:774:34] wire [128:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [515:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [515:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_1377 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_1377 & d_release_ack_1 ? _d_clr_wo_ready_T_1[128:0] : 129'h0; // @[OneHot.scala:58:35] wire _T_1359 = _T_1406 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_1359 ? _d_clr_T_1[128:0] : 129'h0; // @[OneHot.scala:58:35] wire [2062:0] _d_opcodes_clr_T_11 = 2063'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_1359 ? _d_opcodes_clr_T_11[515:0] : 516'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [2062:0] _d_sizes_clr_T_11 = 2063'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_1359 ? _d_sizes_clr_T_11[515:0] : 516'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 8'h0; // @[Monitor.scala:36:7, :795:113] wire [128:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [128:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [515:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [515:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [515:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [515:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_16( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [28:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire [12:0] _GEN = {10'h0, io_in_a_bits_size}; // @[package.scala:243:71] wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35] reg [2:0] a_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [6:0] source; // @[Monitor.scala:390:22] reg [28:0] address; // @[Monitor.scala:391:22] reg [2:0] d_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [6:0] source_1; // @[Monitor.scala:541:22] reg sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [64:0] inflight; // @[Monitor.scala:614:27] reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [259:0] inflight_sizes; // @[Monitor.scala:618:33] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire [127:0] _GEN_0 = {121'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35] wire _GEN_1 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35] wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46] wire _GEN_2 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74] wire [127:0] _GEN_3 = {121'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35] reg [31:0] watchdog; // @[Monitor.scala:709:27] reg [64:0] inflight_1; // @[Monitor.scala:726:35] reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w4_d3_i0_37( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] input [3:0] io_d, // @[ShiftReg.scala:36:14] output [3:0] io_q // @[ShiftReg.scala:36:14] ); wire [3:0] io_d_0 = io_d; // @[SynchronizerReg.scala:80:7] wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_2 = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_4 = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_6 = reset; // @[SynchronizerReg.scala:86:21] wire [3:0] _io_q_T; // @[SynchronizerReg.scala:90:14] wire [3:0] io_q_0; // @[SynchronizerReg.scala:80:7] wire _output_T_1 = io_d_0[0]; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire _output_T_3 = io_d_0[1]; // @[SynchronizerReg.scala:80:7, :87:41] wire output_1; // @[ShiftReg.scala:48:24] wire _output_T_5 = io_d_0[2]; // @[SynchronizerReg.scala:80:7, :87:41] wire output_2; // @[ShiftReg.scala:48:24] wire _output_T_7 = io_d_0[3]; // @[SynchronizerReg.scala:80:7, :87:41] wire output_3; // @[ShiftReg.scala:48:24] wire [1:0] io_q_lo = {output_1, output_0}; // @[SynchronizerReg.scala:90:14] wire [1:0] io_q_hi = {output_3, output_2}; // @[SynchronizerReg.scala:90:14] assign _io_q_T = {io_q_hi, io_q_lo}; // @[SynchronizerReg.scala:90:14] assign io_q_0 = _io_q_T; // @[SynchronizerReg.scala:80:7, :90:14] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_341 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_1), // @[SynchronizerReg.scala:87:41] .io_q (output_0) ); // @[ShiftReg.scala:45:23] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_342 output_chain_1 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T_2), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_3), // @[SynchronizerReg.scala:87:41] .io_q (output_1) ); // @[ShiftReg.scala:45:23] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_343 output_chain_2 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T_4), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_5), // @[SynchronizerReg.scala:87:41] .io_q (output_2) ); // @[ShiftReg.scala:45:23] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_344 output_chain_3 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T_6), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_7), // @[SynchronizerReg.scala:87:41] .io_q (output_3) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File MulRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (ported from Verilog to Chisel by Andrew Waterman). Copyright 2019, 2020 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ import consts._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class MulFullRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule { val io = IO(new Bundle { val a = Input(new RawFloat(expWidth, sigWidth)) val b = Input(new RawFloat(expWidth, sigWidth)) val invalidExc = Output(Bool()) val rawOut = Output(new RawFloat(expWidth, sigWidth*2 - 1)) }) /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ val notSigNaN_invalidExc = (io.a.isInf && io.b.isZero) || (io.a.isZero && io.b.isInf) val notNaN_isInfOut = io.a.isInf || io.b.isInf val notNaN_isZeroOut = io.a.isZero || io.b.isZero val notNaN_signOut = io.a.sign ^ io.b.sign val common_sExpOut = io.a.sExp + io.b.sExp - (1<<expWidth).S val common_sigOut = (io.a.sig * io.b.sig)(sigWidth*2 - 1, 0) /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ io.invalidExc := isSigNaNRawFloat(io.a) || isSigNaNRawFloat(io.b) || notSigNaN_invalidExc io.rawOut.isInf := notNaN_isInfOut io.rawOut.isZero := notNaN_isZeroOut io.rawOut.sExp := common_sExpOut io.rawOut.isNaN := io.a.isNaN || io.b.isNaN io.rawOut.sign := notNaN_signOut io.rawOut.sig := common_sigOut } class MulRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule { val io = IO(new Bundle { val a = Input(new RawFloat(expWidth, sigWidth)) val b = Input(new RawFloat(expWidth, sigWidth)) val invalidExc = Output(Bool()) val rawOut = Output(new RawFloat(expWidth, sigWidth + 2)) }) val mulFullRaw = Module(new MulFullRawFN(expWidth, sigWidth)) mulFullRaw.io.a := io.a mulFullRaw.io.b := io.b io.invalidExc := mulFullRaw.io.invalidExc io.rawOut := mulFullRaw.io.rawOut io.rawOut.sig := { val sig = mulFullRaw.io.rawOut.sig Cat(sig >> (sigWidth - 2), sig(sigWidth - 3, 0).orR) } } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class MulRecFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule { val io = IO(new Bundle { val a = Input(UInt((expWidth + sigWidth + 1).W)) val b = Input(UInt((expWidth + sigWidth + 1).W)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(Bool()) val out = Output(UInt((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(UInt(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val mulRawFN = Module(new MulRawFN(expWidth, sigWidth)) mulRawFN.io.a := rawFloatFromRecFN(expWidth, sigWidth, io.a) mulRawFN.io.b := rawFloatFromRecFN(expWidth, sigWidth, io.b) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundRawFNToRecFN = Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0)) roundRawFNToRecFN.io.invalidExc := mulRawFN.io.invalidExc roundRawFNToRecFN.io.infiniteExc := false.B roundRawFNToRecFN.io.in := mulRawFN.io.rawOut roundRawFNToRecFN.io.roundingMode := io.roundingMode roundRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundRawFNToRecFN.io.out io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags }
module MulFullRawFN_48( // @[MulRecFN.scala:47:7] input io_a_isNaN, // @[MulRecFN.scala:49:16] input io_a_isInf, // @[MulRecFN.scala:49:16] input io_a_isZero, // @[MulRecFN.scala:49:16] input io_a_sign, // @[MulRecFN.scala:49:16] input [9:0] io_a_sExp, // @[MulRecFN.scala:49:16] input [24:0] io_a_sig, // @[MulRecFN.scala:49:16] input io_b_isNaN, // @[MulRecFN.scala:49:16] input io_b_isInf, // @[MulRecFN.scala:49:16] input io_b_isZero, // @[MulRecFN.scala:49:16] input io_b_sign, // @[MulRecFN.scala:49:16] input [9:0] io_b_sExp, // @[MulRecFN.scala:49:16] input [24:0] io_b_sig, // @[MulRecFN.scala:49:16] output io_invalidExc, // @[MulRecFN.scala:49:16] output io_rawOut_isNaN, // @[MulRecFN.scala:49:16] output io_rawOut_isInf, // @[MulRecFN.scala:49:16] output io_rawOut_isZero, // @[MulRecFN.scala:49:16] output io_rawOut_sign, // @[MulRecFN.scala:49:16] output [9:0] io_rawOut_sExp, // @[MulRecFN.scala:49:16] output [47:0] io_rawOut_sig // @[MulRecFN.scala:49:16] ); wire io_a_isNaN_0 = io_a_isNaN; // @[MulRecFN.scala:47:7] wire io_a_isInf_0 = io_a_isInf; // @[MulRecFN.scala:47:7] wire io_a_isZero_0 = io_a_isZero; // @[MulRecFN.scala:47:7] wire io_a_sign_0 = io_a_sign; // @[MulRecFN.scala:47:7] wire [9:0] io_a_sExp_0 = io_a_sExp; // @[MulRecFN.scala:47:7] wire [24:0] io_a_sig_0 = io_a_sig; // @[MulRecFN.scala:47:7] wire io_b_isNaN_0 = io_b_isNaN; // @[MulRecFN.scala:47:7] wire io_b_isInf_0 = io_b_isInf; // @[MulRecFN.scala:47:7] wire io_b_isZero_0 = io_b_isZero; // @[MulRecFN.scala:47:7] wire io_b_sign_0 = io_b_sign; // @[MulRecFN.scala:47:7] wire [9:0] io_b_sExp_0 = io_b_sExp; // @[MulRecFN.scala:47:7] wire [24:0] io_b_sig_0 = io_b_sig; // @[MulRecFN.scala:47:7] wire _io_invalidExc_T_7; // @[MulRecFN.scala:66:71] wire _io_rawOut_isNaN_T; // @[MulRecFN.scala:70:35] wire notNaN_isInfOut; // @[MulRecFN.scala:59:38] wire notNaN_isZeroOut; // @[MulRecFN.scala:60:40] wire notNaN_signOut; // @[MulRecFN.scala:61:36] wire [9:0] common_sExpOut; // @[MulRecFN.scala:62:48] wire [47:0] common_sigOut; // @[MulRecFN.scala:63:46] wire io_rawOut_isNaN_0; // @[MulRecFN.scala:47:7] wire io_rawOut_isInf_0; // @[MulRecFN.scala:47:7] wire io_rawOut_isZero_0; // @[MulRecFN.scala:47:7] wire io_rawOut_sign_0; // @[MulRecFN.scala:47:7] wire [9:0] io_rawOut_sExp_0; // @[MulRecFN.scala:47:7] wire [47:0] io_rawOut_sig_0; // @[MulRecFN.scala:47:7] wire io_invalidExc_0; // @[MulRecFN.scala:47:7] wire _notSigNaN_invalidExc_T = io_a_isInf_0 & io_b_isZero_0; // @[MulRecFN.scala:47:7, :58:44] wire _notSigNaN_invalidExc_T_1 = io_a_isZero_0 & io_b_isInf_0; // @[MulRecFN.scala:47:7, :58:76] wire notSigNaN_invalidExc = _notSigNaN_invalidExc_T | _notSigNaN_invalidExc_T_1; // @[MulRecFN.scala:58:{44,60,76}] assign notNaN_isInfOut = io_a_isInf_0 | io_b_isInf_0; // @[MulRecFN.scala:47:7, :59:38] assign io_rawOut_isInf_0 = notNaN_isInfOut; // @[MulRecFN.scala:47:7, :59:38] assign notNaN_isZeroOut = io_a_isZero_0 | io_b_isZero_0; // @[MulRecFN.scala:47:7, :60:40] assign io_rawOut_isZero_0 = notNaN_isZeroOut; // @[MulRecFN.scala:47:7, :60:40] assign notNaN_signOut = io_a_sign_0 ^ io_b_sign_0; // @[MulRecFN.scala:47:7, :61:36] assign io_rawOut_sign_0 = notNaN_signOut; // @[MulRecFN.scala:47:7, :61:36] wire [10:0] _common_sExpOut_T = {io_a_sExp_0[9], io_a_sExp_0} + {io_b_sExp_0[9], io_b_sExp_0}; // @[MulRecFN.scala:47:7, :62:36] wire [9:0] _common_sExpOut_T_1 = _common_sExpOut_T[9:0]; // @[MulRecFN.scala:62:36] wire [9:0] _common_sExpOut_T_2 = _common_sExpOut_T_1; // @[MulRecFN.scala:62:36] wire [10:0] _common_sExpOut_T_3 = {_common_sExpOut_T_2[9], _common_sExpOut_T_2} - 11'h100; // @[MulRecFN.scala:62:{36,48}] wire [9:0] _common_sExpOut_T_4 = _common_sExpOut_T_3[9:0]; // @[MulRecFN.scala:62:48] assign common_sExpOut = _common_sExpOut_T_4; // @[MulRecFN.scala:62:48] assign io_rawOut_sExp_0 = common_sExpOut; // @[MulRecFN.scala:47:7, :62:48] wire [49:0] _common_sigOut_T = {25'h0, io_a_sig_0} * {25'h0, io_b_sig_0}; // @[MulRecFN.scala:47:7, :63:35] assign common_sigOut = _common_sigOut_T[47:0]; // @[MulRecFN.scala:63:{35,46}] assign io_rawOut_sig_0 = common_sigOut; // @[MulRecFN.scala:47:7, :63:46] wire _io_invalidExc_T = io_a_sig_0[22]; // @[common.scala:82:56] wire _io_invalidExc_T_1 = ~_io_invalidExc_T; // @[common.scala:82:{49,56}] wire _io_invalidExc_T_2 = io_a_isNaN_0 & _io_invalidExc_T_1; // @[common.scala:82:{46,49}] wire _io_invalidExc_T_3 = io_b_sig_0[22]; // @[common.scala:82:56] wire _io_invalidExc_T_4 = ~_io_invalidExc_T_3; // @[common.scala:82:{49,56}] wire _io_invalidExc_T_5 = io_b_isNaN_0 & _io_invalidExc_T_4; // @[common.scala:82:{46,49}] wire _io_invalidExc_T_6 = _io_invalidExc_T_2 | _io_invalidExc_T_5; // @[common.scala:82:46] assign _io_invalidExc_T_7 = _io_invalidExc_T_6 | notSigNaN_invalidExc; // @[MulRecFN.scala:58:60, :66:{45,71}] assign io_invalidExc_0 = _io_invalidExc_T_7; // @[MulRecFN.scala:47:7, :66:71] assign _io_rawOut_isNaN_T = io_a_isNaN_0 | io_b_isNaN_0; // @[MulRecFN.scala:47:7, :70:35] assign io_rawOut_isNaN_0 = _io_rawOut_isNaN_T; // @[MulRecFN.scala:47:7, :70:35] assign io_invalidExc = io_invalidExc_0; // @[MulRecFN.scala:47:7] assign io_rawOut_isNaN = io_rawOut_isNaN_0; // @[MulRecFN.scala:47:7] assign io_rawOut_isInf = io_rawOut_isInf_0; // @[MulRecFN.scala:47:7] assign io_rawOut_isZero = io_rawOut_isZero_0; // @[MulRecFN.scala:47:7] assign io_rawOut_sign = io_rawOut_sign_0; // @[MulRecFN.scala:47:7] assign io_rawOut_sExp = io_rawOut_sExp_0; // @[MulRecFN.scala:47:7] assign io_rawOut_sig = io_rawOut_sig_0; // @[MulRecFN.scala:47:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File RoundAnyRawFNToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util.Fill import consts._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundAnyRawFNToRecFN( inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int, options: Int ) extends RawModule { override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(inExpWidth, inSigWidth)) // (allowed exponent range has limits) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((outExpWidth + outSigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0) val effectiveInSigWidth = if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1 val neverUnderflows = ((options & (flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact) ) != 0) || (inExpWidth < outExpWidth) val neverOverflows = ((options & flRoundOpt_neverOverflows) != 0) || (inExpWidth < outExpWidth) val outNaNExp = BigInt(7)<<(outExpWidth - 2) val outInfExp = BigInt(6)<<(outExpWidth - 2) val outMaxFiniteExp = outInfExp - 1 val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2 val outMinNonzeroExp = outMinNormExp - outSigWidth + 1 //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundingMode_near_even = (io.roundingMode === round_near_even) val roundingMode_minMag = (io.roundingMode === round_minMag) val roundingMode_min = (io.roundingMode === round_min) val roundingMode_max = (io.roundingMode === round_max) val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag) val roundingMode_odd = (io.roundingMode === round_odd) val roundMagUp = (roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sAdjustedExp = if (inExpWidth < outExpWidth) (io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S )(outExpWidth, 0).zext else if (inExpWidth == outExpWidth) io.in.sExp else io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S val adjustedSig = if (inSigWidth <= outSigWidth + 2) io.in.sig<<(outSigWidth - inSigWidth + 2) else (io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ## io.in.sig(inSigWidth - outSigWidth - 2, 0).orR ) val doShiftSigDown1 = if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2) val common_expOut = Wire(UInt((outExpWidth + 1).W)) val common_fractOut = Wire(UInt((outSigWidth - 1).W)) val common_overflow = Wire(Bool()) val common_totalUnderflow = Wire(Bool()) val common_underflow = Wire(Bool()) val common_inexact = Wire(Bool()) if ( neverOverflows && neverUnderflows && (effectiveInSigWidth <= outSigWidth) ) { //-------------------------------------------------------------------- //-------------------------------------------------------------------- common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1 common_fractOut := Mux(doShiftSigDown1, adjustedSig(outSigWidth + 1, 3), adjustedSig(outSigWidth, 2) ) common_overflow := false.B common_totalUnderflow := false.B common_underflow := false.B common_inexact := false.B } else { //-------------------------------------------------------------------- //-------------------------------------------------------------------- val roundMask = if (neverUnderflows) 0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W) else (lowMask( sAdjustedExp(outExpWidth, 0), outMinNormExp - outSigWidth - 1, outMinNormExp ) | doShiftSigDown1) ## 3.U(2.W) val shiftedRoundMask = 0.U(1.W) ## roundMask>>1 val roundPosMask = ~shiftedRoundMask & roundMask val roundPosBit = (adjustedSig & roundPosMask).orR val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR val anyRound = roundPosBit || anyRoundExtra val roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && roundPosBit) || (roundMagUp && anyRound) val roundedSig: Bits = Mux(roundIncr, (((adjustedSig | roundMask)>>2) +& 1.U) & ~Mux(roundingMode_near_even && roundPosBit && ! anyRoundExtra, roundMask>>1, 0.U((outSigWidth + 2).W) ), (adjustedSig & ~roundMask)>>2 | Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U) ) //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext common_expOut := sRoundedExp(outExpWidth, 0) common_fractOut := Mux(doShiftSigDown1, roundedSig(outSigWidth - 1, 1), roundedSig(outSigWidth - 2, 0) ) common_overflow := (if (neverOverflows) false.B else //*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?: (sRoundedExp>>(outExpWidth - 1) >= 3.S)) common_totalUnderflow := (if (neverUnderflows) false.B else //*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?: (sRoundedExp < outMinNonzeroExp.S)) val unboundedRange_roundPosBit = Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1)) val unboundedRange_anyRound = (doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR val unboundedRange_roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && unboundedRange_roundPosBit) || (roundMagUp && unboundedRange_anyRound) val roundCarry = Mux(doShiftSigDown1, roundedSig(outSigWidth + 1), roundedSig(outSigWidth) ) common_underflow := (if (neverUnderflows) false.B else common_totalUnderflow || //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? (anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) && Mux(doShiftSigDown1, roundMask(3), roundMask(2)) && ! ((io.detectTininess === tininess_afterRounding) && ! Mux(doShiftSigDown1, roundMask(4), roundMask(3) ) && roundCarry && roundPosBit && unboundedRange_roundIncr))) common_inexact := common_totalUnderflow || anyRound } //------------------------------------------------------------------------ //------------------------------------------------------------------------ val isNaNOut = io.invalidExc || io.in.isNaN val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero val overflow = commonCase && common_overflow val underflow = commonCase && common_underflow val inexact = overflow || (commonCase && common_inexact) val overflow_roundMagUp = roundingMode_near_even || roundingMode_near_maxMag || roundMagUp val pegMinNonzeroMagOut = commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd) val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp val notNaN_isInfOut = notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp) val signOut = Mux(isNaNOut, false.B, io.in.sign) val expOut = (common_expOut & ~Mux(io.in.isZero || common_totalUnderflow, (BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMinNonzeroMagOut, ~outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMaxFiniteMagOut, (BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W), 0.U ) & ~Mux(notNaN_isInfOut, (BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U )) | Mux(pegMinNonzeroMagOut, outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) | Mux(pegMaxFiniteMagOut, outMaxFiniteExp.U((outExpWidth + 1).W), 0.U ) | Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) | Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U) val fractOut = Mux(isNaNOut || io.in.isZero || common_totalUnderflow, Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U), common_fractOut ) | Fill(outSigWidth - 1, pegMaxFiniteMagOut) io.out := signOut ## expOut ## fractOut io.exceptionFlags := io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int) extends RawModule { override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(expWidth, sigWidth + 2)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( expWidth, sigWidth + 2, expWidth, sigWidth, options)) roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc roundAnyRawFNToRecFN.io.in := io.in roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags }
module RoundRawFNToRecFN_e8_s24_114( // @[RoundAnyRawFNToRecFN.scala:295:5] input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:299:16] input [9:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:299:16] input [26:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:299:16] output [32:0] io_out, // @[RoundAnyRawFNToRecFN.scala:299:16] output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:299:16] ); wire io_invalidExc_0 = io_invalidExc; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_isNaN_0 = io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_isInf_0 = io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:295:5] wire [9:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:295:5] wire [26:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15] wire [2:0] io_roundingMode = 3'h0; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15] wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15] wire [32:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5] wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5] RoundAnyRawFNToRecFN_ie8_is26_oe8_os24_114 roundAnyRawFNToRecFN ( // @[RoundAnyRawFNToRecFN.scala:310:15] .io_invalidExc (io_invalidExc_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_isNaN (io_in_isNaN_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_isInf (io_in_isInf_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_isZero (io_in_isZero_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_sign (io_in_sign_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_sExp (io_in_sExp_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_sig (io_in_sig_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_out (io_out_0), .io_exceptionFlags (io_exceptionFlags_0) ); // @[RoundAnyRawFNToRecFN.scala:310:15] assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5] assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Transposer.scala: package gemmini import chisel3._ import chisel3.util._ import Util._ trait Transposer[T <: Data] extends Module { def dim: Int def dataType: T val io = IO(new Bundle { val inRow = Flipped(Decoupled(Vec(dim, dataType))) val outCol = Decoupled(Vec(dim, dataType)) }) } class PipelinedTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] { require(isPow2(dim)) val regArray = Seq.fill(dim, dim)(Reg(dataType)) val regArrayT = regArray.transpose val sMoveUp :: sMoveLeft :: Nil = Enum(2) val state = RegInit(sMoveUp) val leftCounter = RegInit(0.U(log2Ceil(dim+1).W)) //(io.inRow.fire && state === sMoveLeft, dim+1) val upCounter = RegInit(0.U(log2Ceil(dim+1).W)) //Counter(io.inRow.fire && state === sMoveUp, dim+1) io.outCol.valid := 0.U io.inRow.ready := 0.U switch(state) { is(sMoveUp) { io.inRow.ready := upCounter <= dim.U io.outCol.valid := leftCounter > 0.U when(io.inRow.fire) { upCounter := upCounter + 1.U } when(upCounter === (dim-1).U) { state := sMoveLeft leftCounter := 0.U } when(io.outCol.fire) { leftCounter := leftCounter - 1.U } } is(sMoveLeft) { io.inRow.ready := leftCounter <= dim.U // TODO: this is naive io.outCol.valid := upCounter > 0.U when(leftCounter === (dim-1).U) { state := sMoveUp } when(io.inRow.fire) { leftCounter := leftCounter + 1.U upCounter := 0.U } when(io.outCol.fire) { upCounter := upCounter - 1.U } } } // Propagate input from bottom row to top row systolically in the move up phase // TODO: need to iterate over columns to connect Chisel values of type T // Should be able to operate directly on the Vec, but Seq and Vec don't mix (try Array?) for (colIdx <- 0 until dim) { regArray.foldRight(io.inRow.bits(colIdx)) { case (regRow, prevReg) => when (state === sMoveUp) { regRow(colIdx) := prevReg } regRow(colIdx) } } // Propagate input from right side to left side systolically in the move left phase for (rowIdx <- 0 until dim) { regArrayT.foldRight(io.inRow.bits(rowIdx)) { case (regCol, prevReg) => when (state === sMoveLeft) { regCol(rowIdx) := prevReg } regCol(rowIdx) } } // Pull from the left side or the top side based on the state for (idx <- 0 until dim) { when (state === sMoveUp) { io.outCol.bits(idx) := regArray(0)(idx) }.elsewhen(state === sMoveLeft) { io.outCol.bits(idx) := regArrayT(0)(idx) }.otherwise { io.outCol.bits(idx) := DontCare } } } class AlwaysOutTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] { require(isPow2(dim)) val LEFT_DIR = 0.U(1.W) val UP_DIR = 1.U(1.W) class PE extends Module { val io = IO(new Bundle { val inR = Input(dataType) val inD = Input(dataType) val outL = Output(dataType) val outU = Output(dataType) val dir = Input(UInt(1.W)) val en = Input(Bool()) }) val reg = RegEnable(Mux(io.dir === LEFT_DIR, io.inR, io.inD), io.en) io.outU := reg io.outL := reg } val pes = Seq.fill(dim,dim)(Module(new PE)) val counter = RegInit(0.U((log2Ceil(dim) max 1).W)) // TODO replace this with a standard Chisel counter val dir = RegInit(LEFT_DIR) // Wire up horizontal signals for (row <- 0 until dim; col <- 0 until dim) { val right_in = if (col == dim-1) io.inRow.bits(row) else pes(row)(col+1).io.outL pes(row)(col).io.inR := right_in } // Wire up vertical signals for (row <- 0 until dim; col <- 0 until dim) { val down_in = if (row == dim-1) io.inRow.bits(col) else pes(row+1)(col).io.outU pes(row)(col).io.inD := down_in } // Wire up global signals pes.flatten.foreach(_.io.dir := dir) pes.flatten.foreach(_.io.en := io.inRow.fire) io.outCol.valid := true.B io.inRow.ready := true.B val left_out = VecInit(pes.transpose.head.map(_.io.outL)) val up_out = VecInit(pes.head.map(_.io.outU)) io.outCol.bits := Mux(dir === LEFT_DIR, left_out, up_out) when (io.inRow.fire) { counter := wrappingAdd(counter, 1.U, dim) } when (counter === (dim-1).U && io.inRow.fire) { dir := ~dir } } class NaiveTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] { val regArray = Seq.fill(dim, dim)(Reg(dataType)) val regArrayT = regArray.transpose // state = 0 => filling regArray row-wise, state = 1 => draining regArray column-wise val state = RegInit(0.U(1.W)) val countInc = io.inRow.fire || io.outCol.fire val (countValue, countWrap) = Counter(countInc, dim) io.inRow.ready := state === 0.U io.outCol.valid := state === 1.U for (i <- 0 until dim) { for (j <- 0 until dim) { when(countValue === i.U && io.inRow.fire) { regArray(i)(j) := io.inRow.bits(j) } } } for (i <- 0 until dim) { io.outCol.bits(i) := 0.U for (j <- 0 until dim) { when(countValue === j.U) { io.outCol.bits(i) := regArrayT(j)(i) } } } when (io.inRow.fire && countWrap) { state := 1.U } when (io.outCol.fire && countWrap) { state := 0.U } assert(!(state === 0.U) || !io.outCol.fire) assert(!(state === 1.U) || !io.inRow.fire) }
module PE_165( // @[Transposer.scala:100:9] input clock, // @[Transposer.scala:100:9] input reset, // @[Transposer.scala:100:9] input [7:0] io_inR, // @[Transposer.scala:101:16] input [7:0] io_inD, // @[Transposer.scala:101:16] output [7:0] io_outL, // @[Transposer.scala:101:16] output [7:0] io_outU, // @[Transposer.scala:101:16] input io_dir, // @[Transposer.scala:101:16] input io_en // @[Transposer.scala:101:16] ); wire [7:0] io_inR_0 = io_inR; // @[Transposer.scala:100:9] wire [7:0] io_inD_0 = io_inD; // @[Transposer.scala:100:9] wire io_dir_0 = io_dir; // @[Transposer.scala:100:9] wire io_en_0 = io_en; // @[Transposer.scala:100:9] wire [7:0] io_outL_0; // @[Transposer.scala:100:9] wire [7:0] io_outU_0; // @[Transposer.scala:100:9] wire _reg_T = ~io_dir_0; // @[Transposer.scala:100:9, :110:36] wire [7:0] _reg_T_1 = _reg_T ? io_inR_0 : io_inD_0; // @[Transposer.scala:100:9, :110:{28,36}] reg [7:0] reg_0; // @[Transposer.scala:110:24] assign io_outL_0 = reg_0; // @[Transposer.scala:100:9, :110:24] assign io_outU_0 = reg_0; // @[Transposer.scala:100:9, :110:24] always @(posedge clock) begin // @[Transposer.scala:100:9] if (io_en_0) // @[Transposer.scala:100:9] reg_0 <= _reg_T_1; // @[Transposer.scala:110:{24,28}] always @(posedge) assign io_outL = io_outL_0; // @[Transposer.scala:100:9] assign io_outU = io_outU_0; // @[Transposer.scala:100:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File IdIndexer.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.amba.axi4 import chisel3._ import chisel3.util.{log2Ceil, Cat} import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.lazymodule.{LazyModule, LazyModuleImp} import freechips.rocketchip.diplomacy.IdRange import freechips.rocketchip.util.{ControlKey, SimpleBundleField} case object AXI4ExtraId extends ControlKey[UInt]("extra_id") case class AXI4ExtraIdField(width: Int) extends SimpleBundleField(AXI4ExtraId)(Output(UInt(width.W)), 0.U) /** This adapter limits the set of FIFO domain ids used by outbound transactions. * * Extra AWID and ARID bits from upstream transactions are stored in a User Bits field called AXI4ExtraId, * which values are expected to be echoed back to this adapter alongside any downstream response messages, * and are then prepended to the RID and BID field to restore the original identifier. * * @param idBits is the desired number of A[W|R]ID bits to be used */ class AXI4IdIndexer(idBits: Int)(implicit p: Parameters) extends LazyModule { require (idBits >= 0, s"AXI4IdIndexer: idBits must be > 0, not $idBits") val node = AXI4AdapterNode( masterFn = { mp => // Create one new "master" per ID val masters = Array.tabulate(1 << idBits) { i => AXI4MasterParameters( name = "", id = IdRange(i, i+1), aligned = true, maxFlight = Some(0)) } // Accumulate the names of masters we squish val names = Array.fill(1 << idBits) { new scala.collection.mutable.HashSet[String]() } // Squash the information from original masters into new ID masters mp.masters.foreach { m => for (i <- m.id.start until m.id.end) { val j = i % (1 << idBits) val accumulated = masters(j) names(j) += m.name masters(j) = accumulated.copy( aligned = accumulated.aligned && m.aligned, maxFlight = accumulated.maxFlight.flatMap { o => m.maxFlight.map { n => o+n } }) } } val finalNameStrings = names.map { n => if (n.isEmpty) "(unused)" else n.toList.mkString(", ") } val bits = log2Ceil(mp.endId) - idBits val field = if (bits > 0) Seq(AXI4ExtraIdField(bits)) else Nil mp.copy( echoFields = field ++ mp.echoFields, masters = masters.zip(finalNameStrings).map { case (m, n) => m.copy(name = n) }) }, slaveFn = { sp => sp }) lazy val module = new Impl class Impl extends LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => // Leave everything mostly untouched Connectable.waiveUnmatched(out.ar, in.ar) match { case (lhs, rhs) => lhs.squeezeAll :<>= rhs.squeezeAll } Connectable.waiveUnmatched(out.aw, in.aw) match { case (lhs, rhs) => lhs.squeezeAll :<>= rhs.squeezeAll } Connectable.waiveUnmatched(out.w, in.w) match { case (lhs, rhs) => lhs.squeezeAll :<>= rhs.squeezeAll } Connectable.waiveUnmatched(in.b, out.b) match { case (lhs, rhs) => lhs.squeezeAll :<>= rhs.squeezeAll } Connectable.waiveUnmatched(in.r, out.r) match { case (lhs, rhs) => lhs.squeezeAll :<>= rhs.squeezeAll } val bits = log2Ceil(edgeIn.master.endId) - idBits if (bits > 0) { // (in.aX.bits.id >> idBits).width = bits > 0 out.ar.bits.echo(AXI4ExtraId) := in.ar.bits.id >> idBits out.aw.bits.echo(AXI4ExtraId) := in.aw.bits.id >> idBits // Special care is needed in case of 0 idBits, b/c .id has width 1 still if (idBits == 0) { out.ar.bits.id := 0.U out.aw.bits.id := 0.U in.r.bits.id := out.r.bits.echo(AXI4ExtraId) in.b.bits.id := out.b.bits.echo(AXI4ExtraId) } else { in.r.bits.id := Cat(out.r.bits.echo(AXI4ExtraId), out.r.bits.id) in.b.bits.id := Cat(out.b.bits.echo(AXI4ExtraId), out.b.bits.id) } } } } } object AXI4IdIndexer { def apply(idBits: Int)(implicit p: Parameters): AXI4Node = { val axi4index = LazyModule(new AXI4IdIndexer(idBits)) axi4index.node } } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } }
module AXI4IdIndexer_1( // @[IdIndexer.scala:63:9] input clock, // @[IdIndexer.scala:63:9] input reset, // @[IdIndexer.scala:63:9] output auto_in_aw_ready, // @[LazyModuleImp.scala:107:25] input auto_in_aw_valid, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_aw_bits_id, // @[LazyModuleImp.scala:107:25] input [31:0] auto_in_aw_bits_addr, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_aw_bits_len, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_aw_bits_size, // @[LazyModuleImp.scala:107:25] input [1:0] auto_in_aw_bits_burst, // @[LazyModuleImp.scala:107:25] input auto_in_aw_bits_lock, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_aw_bits_cache, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_aw_bits_prot, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_aw_bits_qos, // @[LazyModuleImp.scala:107:25] output auto_in_w_ready, // @[LazyModuleImp.scala:107:25] input auto_in_w_valid, // @[LazyModuleImp.scala:107:25] input [63:0] auto_in_w_bits_data, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_w_bits_strb, // @[LazyModuleImp.scala:107:25] input auto_in_w_bits_last, // @[LazyModuleImp.scala:107:25] input auto_in_b_ready, // @[LazyModuleImp.scala:107:25] output auto_in_b_valid, // @[LazyModuleImp.scala:107:25] output [7:0] auto_in_b_bits_id, // @[LazyModuleImp.scala:107:25] output [1:0] auto_in_b_bits_resp, // @[LazyModuleImp.scala:107:25] output auto_in_ar_ready, // @[LazyModuleImp.scala:107:25] input auto_in_ar_valid, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_ar_bits_id, // @[LazyModuleImp.scala:107:25] input [31:0] auto_in_ar_bits_addr, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_ar_bits_len, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_ar_bits_size, // @[LazyModuleImp.scala:107:25] input [1:0] auto_in_ar_bits_burst, // @[LazyModuleImp.scala:107:25] input auto_in_ar_bits_lock, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_ar_bits_cache, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_ar_bits_prot, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_ar_bits_qos, // @[LazyModuleImp.scala:107:25] input auto_in_r_ready, // @[LazyModuleImp.scala:107:25] output auto_in_r_valid, // @[LazyModuleImp.scala:107:25] output [7:0] auto_in_r_bits_id, // @[LazyModuleImp.scala:107:25] output [63:0] auto_in_r_bits_data, // @[LazyModuleImp.scala:107:25] output [1:0] auto_in_r_bits_resp, // @[LazyModuleImp.scala:107:25] output auto_in_r_bits_last, // @[LazyModuleImp.scala:107:25] input auto_out_aw_ready, // @[LazyModuleImp.scala:107:25] output auto_out_aw_valid, // @[LazyModuleImp.scala:107:25] output auto_out_aw_bits_id, // @[LazyModuleImp.scala:107:25] output [31:0] auto_out_aw_bits_addr, // @[LazyModuleImp.scala:107:25] output [7:0] auto_out_aw_bits_len, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_aw_bits_size, // @[LazyModuleImp.scala:107:25] output [1:0] auto_out_aw_bits_burst, // @[LazyModuleImp.scala:107:25] output auto_out_aw_bits_lock, // @[LazyModuleImp.scala:107:25] output [3:0] auto_out_aw_bits_cache, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_aw_bits_prot, // @[LazyModuleImp.scala:107:25] output [3:0] auto_out_aw_bits_qos, // @[LazyModuleImp.scala:107:25] output [6:0] auto_out_aw_bits_echo_extra_id, // @[LazyModuleImp.scala:107:25] input auto_out_w_ready, // @[LazyModuleImp.scala:107:25] output auto_out_w_valid, // @[LazyModuleImp.scala:107:25] output [63:0] auto_out_w_bits_data, // @[LazyModuleImp.scala:107:25] output [7:0] auto_out_w_bits_strb, // @[LazyModuleImp.scala:107:25] output auto_out_w_bits_last, // @[LazyModuleImp.scala:107:25] output auto_out_b_ready, // @[LazyModuleImp.scala:107:25] input auto_out_b_valid, // @[LazyModuleImp.scala:107:25] input auto_out_b_bits_id, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_b_bits_resp, // @[LazyModuleImp.scala:107:25] input [6:0] auto_out_b_bits_echo_extra_id, // @[LazyModuleImp.scala:107:25] input auto_out_ar_ready, // @[LazyModuleImp.scala:107:25] output auto_out_ar_valid, // @[LazyModuleImp.scala:107:25] output auto_out_ar_bits_id, // @[LazyModuleImp.scala:107:25] output [31:0] auto_out_ar_bits_addr, // @[LazyModuleImp.scala:107:25] output [7:0] auto_out_ar_bits_len, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_ar_bits_size, // @[LazyModuleImp.scala:107:25] output [1:0] auto_out_ar_bits_burst, // @[LazyModuleImp.scala:107:25] output auto_out_ar_bits_lock, // @[LazyModuleImp.scala:107:25] output [3:0] auto_out_ar_bits_cache, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_ar_bits_prot, // @[LazyModuleImp.scala:107:25] output [3:0] auto_out_ar_bits_qos, // @[LazyModuleImp.scala:107:25] output [6:0] auto_out_ar_bits_echo_extra_id, // @[LazyModuleImp.scala:107:25] output auto_out_r_ready, // @[LazyModuleImp.scala:107:25] input auto_out_r_valid, // @[LazyModuleImp.scala:107:25] input auto_out_r_bits_id, // @[LazyModuleImp.scala:107:25] input [63:0] auto_out_r_bits_data, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_r_bits_resp, // @[LazyModuleImp.scala:107:25] input [6:0] auto_out_r_bits_echo_extra_id, // @[LazyModuleImp.scala:107:25] input auto_out_r_bits_last // @[LazyModuleImp.scala:107:25] ); wire auto_in_aw_valid_0 = auto_in_aw_valid; // @[IdIndexer.scala:63:9] wire [7:0] auto_in_aw_bits_id_0 = auto_in_aw_bits_id; // @[IdIndexer.scala:63:9] wire [31:0] auto_in_aw_bits_addr_0 = auto_in_aw_bits_addr; // @[IdIndexer.scala:63:9] wire [7:0] auto_in_aw_bits_len_0 = auto_in_aw_bits_len; // @[IdIndexer.scala:63:9] wire [2:0] auto_in_aw_bits_size_0 = auto_in_aw_bits_size; // @[IdIndexer.scala:63:9] wire [1:0] auto_in_aw_bits_burst_0 = auto_in_aw_bits_burst; // @[IdIndexer.scala:63:9] wire auto_in_aw_bits_lock_0 = auto_in_aw_bits_lock; // @[IdIndexer.scala:63:9] wire [3:0] auto_in_aw_bits_cache_0 = auto_in_aw_bits_cache; // @[IdIndexer.scala:63:9] wire [2:0] auto_in_aw_bits_prot_0 = auto_in_aw_bits_prot; // @[IdIndexer.scala:63:9] wire [3:0] auto_in_aw_bits_qos_0 = auto_in_aw_bits_qos; // @[IdIndexer.scala:63:9] wire auto_in_w_valid_0 = auto_in_w_valid; // @[IdIndexer.scala:63:9] wire [63:0] auto_in_w_bits_data_0 = auto_in_w_bits_data; // @[IdIndexer.scala:63:9] wire [7:0] auto_in_w_bits_strb_0 = auto_in_w_bits_strb; // @[IdIndexer.scala:63:9] wire auto_in_w_bits_last_0 = auto_in_w_bits_last; // @[IdIndexer.scala:63:9] wire auto_in_b_ready_0 = auto_in_b_ready; // @[IdIndexer.scala:63:9] wire auto_in_ar_valid_0 = auto_in_ar_valid; // @[IdIndexer.scala:63:9] wire [7:0] auto_in_ar_bits_id_0 = auto_in_ar_bits_id; // @[IdIndexer.scala:63:9] wire [31:0] auto_in_ar_bits_addr_0 = auto_in_ar_bits_addr; // @[IdIndexer.scala:63:9] wire [7:0] auto_in_ar_bits_len_0 = auto_in_ar_bits_len; // @[IdIndexer.scala:63:9] wire [2:0] auto_in_ar_bits_size_0 = auto_in_ar_bits_size; // @[IdIndexer.scala:63:9] wire [1:0] auto_in_ar_bits_burst_0 = auto_in_ar_bits_burst; // @[IdIndexer.scala:63:9] wire auto_in_ar_bits_lock_0 = auto_in_ar_bits_lock; // @[IdIndexer.scala:63:9] wire [3:0] auto_in_ar_bits_cache_0 = auto_in_ar_bits_cache; // @[IdIndexer.scala:63:9] wire [2:0] auto_in_ar_bits_prot_0 = auto_in_ar_bits_prot; // @[IdIndexer.scala:63:9] wire [3:0] auto_in_ar_bits_qos_0 = auto_in_ar_bits_qos; // @[IdIndexer.scala:63:9] wire auto_in_r_ready_0 = auto_in_r_ready; // @[IdIndexer.scala:63:9] wire auto_out_aw_ready_0 = auto_out_aw_ready; // @[IdIndexer.scala:63:9] wire auto_out_w_ready_0 = auto_out_w_ready; // @[IdIndexer.scala:63:9] wire auto_out_b_valid_0 = auto_out_b_valid; // @[IdIndexer.scala:63:9] wire auto_out_b_bits_id_0 = auto_out_b_bits_id; // @[IdIndexer.scala:63:9] wire [1:0] auto_out_b_bits_resp_0 = auto_out_b_bits_resp; // @[IdIndexer.scala:63:9] wire [6:0] auto_out_b_bits_echo_extra_id_0 = auto_out_b_bits_echo_extra_id; // @[IdIndexer.scala:63:9] wire auto_out_ar_ready_0 = auto_out_ar_ready; // @[IdIndexer.scala:63:9] wire auto_out_r_valid_0 = auto_out_r_valid; // @[IdIndexer.scala:63:9] wire auto_out_r_bits_id_0 = auto_out_r_bits_id; // @[IdIndexer.scala:63:9] wire [63:0] auto_out_r_bits_data_0 = auto_out_r_bits_data; // @[IdIndexer.scala:63:9] wire [1:0] auto_out_r_bits_resp_0 = auto_out_r_bits_resp; // @[IdIndexer.scala:63:9] wire [6:0] auto_out_r_bits_echo_extra_id_0 = auto_out_r_bits_echo_extra_id; // @[IdIndexer.scala:63:9] wire auto_out_r_bits_last_0 = auto_out_r_bits_last; // @[IdIndexer.scala:63:9] wire nodeIn_aw_ready; // @[MixedNode.scala:551:17] wire nodeIn_aw_valid = auto_in_aw_valid_0; // @[IdIndexer.scala:63:9] wire [7:0] nodeIn_aw_bits_id = auto_in_aw_bits_id_0; // @[IdIndexer.scala:63:9] wire [31:0] nodeIn_aw_bits_addr = auto_in_aw_bits_addr_0; // @[IdIndexer.scala:63:9] wire [7:0] nodeIn_aw_bits_len = auto_in_aw_bits_len_0; // @[IdIndexer.scala:63:9] wire [2:0] nodeIn_aw_bits_size = auto_in_aw_bits_size_0; // @[IdIndexer.scala:63:9] wire [1:0] nodeIn_aw_bits_burst = auto_in_aw_bits_burst_0; // @[IdIndexer.scala:63:9] wire nodeIn_aw_bits_lock = auto_in_aw_bits_lock_0; // @[IdIndexer.scala:63:9] wire [3:0] nodeIn_aw_bits_cache = auto_in_aw_bits_cache_0; // @[IdIndexer.scala:63:9] wire [2:0] nodeIn_aw_bits_prot = auto_in_aw_bits_prot_0; // @[IdIndexer.scala:63:9] wire [3:0] nodeIn_aw_bits_qos = auto_in_aw_bits_qos_0; // @[IdIndexer.scala:63:9] wire nodeIn_w_ready; // @[MixedNode.scala:551:17] wire nodeIn_w_valid = auto_in_w_valid_0; // @[IdIndexer.scala:63:9] wire [63:0] nodeIn_w_bits_data = auto_in_w_bits_data_0; // @[IdIndexer.scala:63:9] wire [7:0] nodeIn_w_bits_strb = auto_in_w_bits_strb_0; // @[IdIndexer.scala:63:9] wire nodeIn_w_bits_last = auto_in_w_bits_last_0; // @[IdIndexer.scala:63:9] wire nodeIn_b_ready = auto_in_b_ready_0; // @[IdIndexer.scala:63:9] wire nodeIn_b_valid; // @[MixedNode.scala:551:17] wire [7:0] nodeIn_b_bits_id; // @[MixedNode.scala:551:17] wire [1:0] nodeIn_b_bits_resp; // @[MixedNode.scala:551:17] wire nodeIn_ar_ready; // @[MixedNode.scala:551:17] wire nodeIn_ar_valid = auto_in_ar_valid_0; // @[IdIndexer.scala:63:9] wire [7:0] nodeIn_ar_bits_id = auto_in_ar_bits_id_0; // @[IdIndexer.scala:63:9] wire [31:0] nodeIn_ar_bits_addr = auto_in_ar_bits_addr_0; // @[IdIndexer.scala:63:9] wire [7:0] nodeIn_ar_bits_len = auto_in_ar_bits_len_0; // @[IdIndexer.scala:63:9] wire [2:0] nodeIn_ar_bits_size = auto_in_ar_bits_size_0; // @[IdIndexer.scala:63:9] wire [1:0] nodeIn_ar_bits_burst = auto_in_ar_bits_burst_0; // @[IdIndexer.scala:63:9] wire nodeIn_ar_bits_lock = auto_in_ar_bits_lock_0; // @[IdIndexer.scala:63:9] wire [3:0] nodeIn_ar_bits_cache = auto_in_ar_bits_cache_0; // @[IdIndexer.scala:63:9] wire [2:0] nodeIn_ar_bits_prot = auto_in_ar_bits_prot_0; // @[IdIndexer.scala:63:9] wire [3:0] nodeIn_ar_bits_qos = auto_in_ar_bits_qos_0; // @[IdIndexer.scala:63:9] wire nodeIn_r_ready = auto_in_r_ready_0; // @[IdIndexer.scala:63:9] wire nodeIn_r_valid; // @[MixedNode.scala:551:17] wire [7:0] nodeIn_r_bits_id; // @[MixedNode.scala:551:17] wire [63:0] nodeIn_r_bits_data; // @[MixedNode.scala:551:17] wire [1:0] nodeIn_r_bits_resp; // @[MixedNode.scala:551:17] wire nodeIn_r_bits_last; // @[MixedNode.scala:551:17] wire nodeOut_aw_ready = auto_out_aw_ready_0; // @[IdIndexer.scala:63:9] wire nodeOut_aw_valid; // @[MixedNode.scala:542:17] wire nodeOut_aw_bits_id; // @[MixedNode.scala:542:17] wire [31:0] nodeOut_aw_bits_addr; // @[MixedNode.scala:542:17] wire [7:0] nodeOut_aw_bits_len; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_aw_bits_size; // @[MixedNode.scala:542:17] wire [1:0] nodeOut_aw_bits_burst; // @[MixedNode.scala:542:17] wire nodeOut_aw_bits_lock; // @[MixedNode.scala:542:17] wire [3:0] nodeOut_aw_bits_cache; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_aw_bits_prot; // @[MixedNode.scala:542:17] wire [3:0] nodeOut_aw_bits_qos; // @[MixedNode.scala:542:17] wire [6:0] nodeOut_aw_bits_echo_extra_id; // @[MixedNode.scala:542:17] wire nodeOut_w_ready = auto_out_w_ready_0; // @[IdIndexer.scala:63:9] wire nodeOut_w_valid; // @[MixedNode.scala:542:17] wire [63:0] nodeOut_w_bits_data; // @[MixedNode.scala:542:17] wire [7:0] nodeOut_w_bits_strb; // @[MixedNode.scala:542:17] wire nodeOut_w_bits_last; // @[MixedNode.scala:542:17] wire nodeOut_b_ready; // @[MixedNode.scala:542:17] wire nodeOut_b_valid = auto_out_b_valid_0; // @[IdIndexer.scala:63:9] wire nodeOut_b_bits_id = auto_out_b_bits_id_0; // @[IdIndexer.scala:63:9] wire [1:0] nodeOut_b_bits_resp = auto_out_b_bits_resp_0; // @[IdIndexer.scala:63:9] wire [6:0] nodeOut_b_bits_echo_extra_id = auto_out_b_bits_echo_extra_id_0; // @[IdIndexer.scala:63:9] wire nodeOut_ar_ready = auto_out_ar_ready_0; // @[IdIndexer.scala:63:9] wire nodeOut_ar_valid; // @[MixedNode.scala:542:17] wire nodeOut_ar_bits_id; // @[MixedNode.scala:542:17] wire [31:0] nodeOut_ar_bits_addr; // @[MixedNode.scala:542:17] wire [7:0] nodeOut_ar_bits_len; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_ar_bits_size; // @[MixedNode.scala:542:17] wire [1:0] nodeOut_ar_bits_burst; // @[MixedNode.scala:542:17] wire nodeOut_ar_bits_lock; // @[MixedNode.scala:542:17] wire [3:0] nodeOut_ar_bits_cache; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_ar_bits_prot; // @[MixedNode.scala:542:17] wire [3:0] nodeOut_ar_bits_qos; // @[MixedNode.scala:542:17] wire [6:0] nodeOut_ar_bits_echo_extra_id; // @[MixedNode.scala:542:17] wire nodeOut_r_ready; // @[MixedNode.scala:542:17] wire nodeOut_r_valid = auto_out_r_valid_0; // @[IdIndexer.scala:63:9] wire nodeOut_r_bits_id = auto_out_r_bits_id_0; // @[IdIndexer.scala:63:9] wire [63:0] nodeOut_r_bits_data = auto_out_r_bits_data_0; // @[IdIndexer.scala:63:9] wire [1:0] nodeOut_r_bits_resp = auto_out_r_bits_resp_0; // @[IdIndexer.scala:63:9] wire [6:0] nodeOut_r_bits_echo_extra_id = auto_out_r_bits_echo_extra_id_0; // @[IdIndexer.scala:63:9] wire nodeOut_r_bits_last = auto_out_r_bits_last_0; // @[IdIndexer.scala:63:9] wire auto_in_aw_ready_0; // @[IdIndexer.scala:63:9] wire auto_in_w_ready_0; // @[IdIndexer.scala:63:9] wire [7:0] auto_in_b_bits_id_0; // @[IdIndexer.scala:63:9] wire [1:0] auto_in_b_bits_resp_0; // @[IdIndexer.scala:63:9] wire auto_in_b_valid_0; // @[IdIndexer.scala:63:9] wire auto_in_ar_ready_0; // @[IdIndexer.scala:63:9] wire [7:0] auto_in_r_bits_id_0; // @[IdIndexer.scala:63:9] wire [63:0] auto_in_r_bits_data_0; // @[IdIndexer.scala:63:9] wire [1:0] auto_in_r_bits_resp_0; // @[IdIndexer.scala:63:9] wire auto_in_r_bits_last_0; // @[IdIndexer.scala:63:9] wire auto_in_r_valid_0; // @[IdIndexer.scala:63:9] wire [6:0] auto_out_aw_bits_echo_extra_id_0; // @[IdIndexer.scala:63:9] wire auto_out_aw_bits_id_0; // @[IdIndexer.scala:63:9] wire [31:0] auto_out_aw_bits_addr_0; // @[IdIndexer.scala:63:9] wire [7:0] auto_out_aw_bits_len_0; // @[IdIndexer.scala:63:9] wire [2:0] auto_out_aw_bits_size_0; // @[IdIndexer.scala:63:9] wire [1:0] auto_out_aw_bits_burst_0; // @[IdIndexer.scala:63:9] wire auto_out_aw_bits_lock_0; // @[IdIndexer.scala:63:9] wire [3:0] auto_out_aw_bits_cache_0; // @[IdIndexer.scala:63:9] wire [2:0] auto_out_aw_bits_prot_0; // @[IdIndexer.scala:63:9] wire [3:0] auto_out_aw_bits_qos_0; // @[IdIndexer.scala:63:9] wire auto_out_aw_valid_0; // @[IdIndexer.scala:63:9] wire [63:0] auto_out_w_bits_data_0; // @[IdIndexer.scala:63:9] wire [7:0] auto_out_w_bits_strb_0; // @[IdIndexer.scala:63:9] wire auto_out_w_bits_last_0; // @[IdIndexer.scala:63:9] wire auto_out_w_valid_0; // @[IdIndexer.scala:63:9] wire auto_out_b_ready_0; // @[IdIndexer.scala:63:9] wire [6:0] auto_out_ar_bits_echo_extra_id_0; // @[IdIndexer.scala:63:9] wire auto_out_ar_bits_id_0; // @[IdIndexer.scala:63:9] wire [31:0] auto_out_ar_bits_addr_0; // @[IdIndexer.scala:63:9] wire [7:0] auto_out_ar_bits_len_0; // @[IdIndexer.scala:63:9] wire [2:0] auto_out_ar_bits_size_0; // @[IdIndexer.scala:63:9] wire [1:0] auto_out_ar_bits_burst_0; // @[IdIndexer.scala:63:9] wire auto_out_ar_bits_lock_0; // @[IdIndexer.scala:63:9] wire [3:0] auto_out_ar_bits_cache_0; // @[IdIndexer.scala:63:9] wire [2:0] auto_out_ar_bits_prot_0; // @[IdIndexer.scala:63:9] wire [3:0] auto_out_ar_bits_qos_0; // @[IdIndexer.scala:63:9] wire auto_out_ar_valid_0; // @[IdIndexer.scala:63:9] wire auto_out_r_ready_0; // @[IdIndexer.scala:63:9] assign auto_in_aw_ready_0 = nodeIn_aw_ready; // @[IdIndexer.scala:63:9] assign nodeOut_aw_valid = nodeIn_aw_valid; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_aw_bits_addr = nodeIn_aw_bits_addr; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_aw_bits_len = nodeIn_aw_bits_len; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_aw_bits_size = nodeIn_aw_bits_size; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_aw_bits_burst = nodeIn_aw_bits_burst; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_aw_bits_lock = nodeIn_aw_bits_lock; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_aw_bits_cache = nodeIn_aw_bits_cache; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_aw_bits_prot = nodeIn_aw_bits_prot; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_aw_bits_qos = nodeIn_aw_bits_qos; // @[MixedNode.scala:542:17, :551:17] assign auto_in_w_ready_0 = nodeIn_w_ready; // @[IdIndexer.scala:63:9] assign nodeOut_w_valid = nodeIn_w_valid; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_w_bits_data = nodeIn_w_bits_data; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_w_bits_strb = nodeIn_w_bits_strb; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_w_bits_last = nodeIn_w_bits_last; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_b_ready = nodeIn_b_ready; // @[MixedNode.scala:542:17, :551:17] assign auto_in_b_valid_0 = nodeIn_b_valid; // @[IdIndexer.scala:63:9] wire [7:0] _nodeIn_b_bits_id_T; // @[IdIndexer.scala:97:30] assign auto_in_b_bits_id_0 = nodeIn_b_bits_id; // @[IdIndexer.scala:63:9] assign auto_in_b_bits_resp_0 = nodeIn_b_bits_resp; // @[IdIndexer.scala:63:9] assign auto_in_ar_ready_0 = nodeIn_ar_ready; // @[IdIndexer.scala:63:9] assign nodeOut_ar_valid = nodeIn_ar_valid; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_addr = nodeIn_ar_bits_addr; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_len = nodeIn_ar_bits_len; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_size = nodeIn_ar_bits_size; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_burst = nodeIn_ar_bits_burst; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_lock = nodeIn_ar_bits_lock; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_cache = nodeIn_ar_bits_cache; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_prot = nodeIn_ar_bits_prot; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_qos = nodeIn_ar_bits_qos; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_r_ready = nodeIn_r_ready; // @[MixedNode.scala:542:17, :551:17] assign auto_in_r_valid_0 = nodeIn_r_valid; // @[IdIndexer.scala:63:9] wire [7:0] _nodeIn_r_bits_id_T; // @[IdIndexer.scala:96:30] assign auto_in_r_bits_id_0 = nodeIn_r_bits_id; // @[IdIndexer.scala:63:9] assign auto_in_r_bits_data_0 = nodeIn_r_bits_data; // @[IdIndexer.scala:63:9] assign auto_in_r_bits_resp_0 = nodeIn_r_bits_resp; // @[IdIndexer.scala:63:9] assign auto_in_r_bits_last_0 = nodeIn_r_bits_last; // @[IdIndexer.scala:63:9] assign nodeIn_aw_ready = nodeOut_aw_ready; // @[MixedNode.scala:542:17, :551:17] assign auto_out_aw_valid_0 = nodeOut_aw_valid; // @[IdIndexer.scala:63:9] assign auto_out_aw_bits_id_0 = nodeOut_aw_bits_id; // @[IdIndexer.scala:63:9] assign auto_out_aw_bits_addr_0 = nodeOut_aw_bits_addr; // @[IdIndexer.scala:63:9] assign auto_out_aw_bits_len_0 = nodeOut_aw_bits_len; // @[IdIndexer.scala:63:9] assign auto_out_aw_bits_size_0 = nodeOut_aw_bits_size; // @[IdIndexer.scala:63:9] assign auto_out_aw_bits_burst_0 = nodeOut_aw_bits_burst; // @[IdIndexer.scala:63:9] assign auto_out_aw_bits_lock_0 = nodeOut_aw_bits_lock; // @[IdIndexer.scala:63:9] assign auto_out_aw_bits_cache_0 = nodeOut_aw_bits_cache; // @[IdIndexer.scala:63:9] assign auto_out_aw_bits_prot_0 = nodeOut_aw_bits_prot; // @[IdIndexer.scala:63:9] assign auto_out_aw_bits_qos_0 = nodeOut_aw_bits_qos; // @[IdIndexer.scala:63:9] wire [6:0] _nodeOut_aw_bits_echo_extra_id_T; // @[IdIndexer.scala:88:56] assign auto_out_aw_bits_echo_extra_id_0 = nodeOut_aw_bits_echo_extra_id; // @[IdIndexer.scala:63:9] assign nodeIn_w_ready = nodeOut_w_ready; // @[MixedNode.scala:542:17, :551:17] assign auto_out_w_valid_0 = nodeOut_w_valid; // @[IdIndexer.scala:63:9] assign auto_out_w_bits_data_0 = nodeOut_w_bits_data; // @[IdIndexer.scala:63:9] assign auto_out_w_bits_strb_0 = nodeOut_w_bits_strb; // @[IdIndexer.scala:63:9] assign auto_out_w_bits_last_0 = nodeOut_w_bits_last; // @[IdIndexer.scala:63:9] assign auto_out_b_ready_0 = nodeOut_b_ready; // @[IdIndexer.scala:63:9] assign nodeIn_b_valid = nodeOut_b_valid; // @[MixedNode.scala:542:17, :551:17] assign nodeIn_b_bits_resp = nodeOut_b_bits_resp; // @[MixedNode.scala:542:17, :551:17] assign nodeIn_ar_ready = nodeOut_ar_ready; // @[MixedNode.scala:542:17, :551:17] assign auto_out_ar_valid_0 = nodeOut_ar_valid; // @[IdIndexer.scala:63:9] assign auto_out_ar_bits_id_0 = nodeOut_ar_bits_id; // @[IdIndexer.scala:63:9] assign auto_out_ar_bits_addr_0 = nodeOut_ar_bits_addr; // @[IdIndexer.scala:63:9] assign auto_out_ar_bits_len_0 = nodeOut_ar_bits_len; // @[IdIndexer.scala:63:9] assign auto_out_ar_bits_size_0 = nodeOut_ar_bits_size; // @[IdIndexer.scala:63:9] assign auto_out_ar_bits_burst_0 = nodeOut_ar_bits_burst; // @[IdIndexer.scala:63:9] assign auto_out_ar_bits_lock_0 = nodeOut_ar_bits_lock; // @[IdIndexer.scala:63:9] assign auto_out_ar_bits_cache_0 = nodeOut_ar_bits_cache; // @[IdIndexer.scala:63:9] assign auto_out_ar_bits_prot_0 = nodeOut_ar_bits_prot; // @[IdIndexer.scala:63:9] assign auto_out_ar_bits_qos_0 = nodeOut_ar_bits_qos; // @[IdIndexer.scala:63:9] wire [6:0] _nodeOut_ar_bits_echo_extra_id_T; // @[IdIndexer.scala:87:56] assign auto_out_ar_bits_echo_extra_id_0 = nodeOut_ar_bits_echo_extra_id; // @[IdIndexer.scala:63:9] assign auto_out_r_ready_0 = nodeOut_r_ready; // @[IdIndexer.scala:63:9] assign nodeIn_r_valid = nodeOut_r_valid; // @[MixedNode.scala:542:17, :551:17] assign nodeIn_r_bits_data = nodeOut_r_bits_data; // @[MixedNode.scala:542:17, :551:17] assign nodeIn_r_bits_resp = nodeOut_r_bits_resp; // @[MixedNode.scala:542:17, :551:17] assign nodeIn_r_bits_last = nodeOut_r_bits_last; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_id = nodeIn_ar_bits_id[0]; // @[IdIndexer.scala:69:43] assign nodeOut_aw_bits_id = nodeIn_aw_bits_id[0]; // @[IdIndexer.scala:72:43] assign _nodeOut_ar_bits_echo_extra_id_T = nodeIn_ar_bits_id[7:1]; // @[IdIndexer.scala:87:56] assign nodeOut_ar_bits_echo_extra_id = _nodeOut_ar_bits_echo_extra_id_T; // @[IdIndexer.scala:87:56] assign _nodeOut_aw_bits_echo_extra_id_T = nodeIn_aw_bits_id[7:1]; // @[IdIndexer.scala:88:56] assign nodeOut_aw_bits_echo_extra_id = _nodeOut_aw_bits_echo_extra_id_T; // @[IdIndexer.scala:88:56] assign _nodeIn_r_bits_id_T = {nodeOut_r_bits_echo_extra_id, nodeOut_r_bits_id}; // @[IdIndexer.scala:96:30] assign nodeIn_r_bits_id = _nodeIn_r_bits_id_T; // @[IdIndexer.scala:96:30] assign _nodeIn_b_bits_id_T = {nodeOut_b_bits_echo_extra_id, nodeOut_b_bits_id}; // @[IdIndexer.scala:97:30] assign nodeIn_b_bits_id = _nodeIn_b_bits_id_T; // @[IdIndexer.scala:97:30] assign auto_in_aw_ready = auto_in_aw_ready_0; // @[IdIndexer.scala:63:9] assign auto_in_w_ready = auto_in_w_ready_0; // @[IdIndexer.scala:63:9] assign auto_in_b_valid = auto_in_b_valid_0; // @[IdIndexer.scala:63:9] assign auto_in_b_bits_id = auto_in_b_bits_id_0; // @[IdIndexer.scala:63:9] assign auto_in_b_bits_resp = auto_in_b_bits_resp_0; // @[IdIndexer.scala:63:9] assign auto_in_ar_ready = auto_in_ar_ready_0; // @[IdIndexer.scala:63:9] assign auto_in_r_valid = auto_in_r_valid_0; // @[IdIndexer.scala:63:9] assign auto_in_r_bits_id = auto_in_r_bits_id_0; // @[IdIndexer.scala:63:9] assign auto_in_r_bits_data = auto_in_r_bits_data_0; // @[IdIndexer.scala:63:9] assign auto_in_r_bits_resp = auto_in_r_bits_resp_0; // @[IdIndexer.scala:63:9] assign auto_in_r_bits_last = auto_in_r_bits_last_0; // @[IdIndexer.scala:63:9] assign auto_out_aw_valid = auto_out_aw_valid_0; // @[IdIndexer.scala:63:9] assign auto_out_aw_bits_id = auto_out_aw_bits_id_0; // @[IdIndexer.scala:63:9] assign auto_out_aw_bits_addr = auto_out_aw_bits_addr_0; // @[IdIndexer.scala:63:9] assign auto_out_aw_bits_len = auto_out_aw_bits_len_0; // @[IdIndexer.scala:63:9] assign auto_out_aw_bits_size = auto_out_aw_bits_size_0; // @[IdIndexer.scala:63:9] assign auto_out_aw_bits_burst = auto_out_aw_bits_burst_0; // @[IdIndexer.scala:63:9] assign auto_out_aw_bits_lock = auto_out_aw_bits_lock_0; // @[IdIndexer.scala:63:9] assign auto_out_aw_bits_cache = auto_out_aw_bits_cache_0; // @[IdIndexer.scala:63:9] assign auto_out_aw_bits_prot = auto_out_aw_bits_prot_0; // @[IdIndexer.scala:63:9] assign auto_out_aw_bits_qos = auto_out_aw_bits_qos_0; // @[IdIndexer.scala:63:9] assign auto_out_aw_bits_echo_extra_id = auto_out_aw_bits_echo_extra_id_0; // @[IdIndexer.scala:63:9] assign auto_out_w_valid = auto_out_w_valid_0; // @[IdIndexer.scala:63:9] assign auto_out_w_bits_data = auto_out_w_bits_data_0; // @[IdIndexer.scala:63:9] assign auto_out_w_bits_strb = auto_out_w_bits_strb_0; // @[IdIndexer.scala:63:9] assign auto_out_w_bits_last = auto_out_w_bits_last_0; // @[IdIndexer.scala:63:9] assign auto_out_b_ready = auto_out_b_ready_0; // @[IdIndexer.scala:63:9] assign auto_out_ar_valid = auto_out_ar_valid_0; // @[IdIndexer.scala:63:9] assign auto_out_ar_bits_id = auto_out_ar_bits_id_0; // @[IdIndexer.scala:63:9] assign auto_out_ar_bits_addr = auto_out_ar_bits_addr_0; // @[IdIndexer.scala:63:9] assign auto_out_ar_bits_len = auto_out_ar_bits_len_0; // @[IdIndexer.scala:63:9] assign auto_out_ar_bits_size = auto_out_ar_bits_size_0; // @[IdIndexer.scala:63:9] assign auto_out_ar_bits_burst = auto_out_ar_bits_burst_0; // @[IdIndexer.scala:63:9] assign auto_out_ar_bits_lock = auto_out_ar_bits_lock_0; // @[IdIndexer.scala:63:9] assign auto_out_ar_bits_cache = auto_out_ar_bits_cache_0; // @[IdIndexer.scala:63:9] assign auto_out_ar_bits_prot = auto_out_ar_bits_prot_0; // @[IdIndexer.scala:63:9] assign auto_out_ar_bits_qos = auto_out_ar_bits_qos_0; // @[IdIndexer.scala:63:9] assign auto_out_ar_bits_echo_extra_id = auto_out_ar_bits_echo_extra_id_0; // @[IdIndexer.scala:63:9] assign auto_out_r_ready = auto_out_r_ready_0; // @[IdIndexer.scala:63:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module ResetSynchronizerShiftReg_w1_d3_i0( // @[SynchronizerReg.scala:142:7] input clock, // @[SynchronizerReg.scala:142:7] input reset, // @[SynchronizerReg.scala:142:7] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); InferredResetSynchronizerPrimitiveShiftReg_d3_i0 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_d (io_d), .io_q (io_q) ); // @[ShiftReg.scala:45:23] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Compactor.scala: package saturn.common import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import freechips.rocketchip.rocket._ import freechips.rocketchip.util._ import freechips.rocketchip.tile._ class CompactorReq(n: Int) extends Bundle { val head = UInt(log2Ceil(n).W) val tail = UInt(log2Ceil(n).W) def count = Mux(tail === 0.U, n.U, tail) - head } class Compactor[T <: Data](pushN: Int, popN: Int, gen: => T, forward: Boolean) extends Module { require (pushN >= popN) val io = IO(new Bundle { val push = Flipped(Decoupled(new CompactorReq(pushN))) val push_data = Input(Vec(pushN, gen)) val pop = Flipped(Decoupled(new CompactorReq(popN))) val pop_data = Output(Vec(popN, gen)) }) val (push, push_data) = if (forward) { (io.push, io.push_data) } else { val push_q = Module(new Queue(new CompactorReq(pushN) { val data = Vec(pushN, gen) }, 2)) push_q.io.enq.valid := io.push.valid push_q.io.enq.bits.head := io.push.bits.head push_q.io.enq.bits.tail := io.push.bits.tail push_q.io.enq.bits.data := io.push_data io.push.ready := push_q.io.enq.ready (push_q.io.deq, push_q.io.deq.bits.data) } def wshr(in: Seq[T], shamt: UInt): Seq[T] = (0 until in.size).map { i => VecInit(in.drop(i))(shamt) } def wshl(in: Seq[T], shamt: UInt): Seq[T] = wshr(in.reverse, shamt).reverse val count = RegInit(0.U((1+log2Ceil(pushN)).W)) val regs = Seq.fill(pushN) { Reg(gen) } val valid = (1.U << count) - 1.U push.ready := pushN.U +& Mux(io.pop.valid, io.pop.bits.count, 0.U) >= count +& push.bits.count io.pop.ready := count +& Mux(push.valid, push.bits.count, 0.U) >= io.pop.bits.count val regs_shr = wshr(regs, io.pop.bits.count) val valid_shr = valid >> io.pop.bits.count when (push.fire || io.pop.fire) { count := count +& Mux(push.fire, push.bits.count, 0.U) - Mux(io.pop.fire, io.pop.bits.count, 0.U) } val push_elems = push_data val push_shr = wshr((Seq.fill(pushN)(0.U.asTypeOf(gen)) ++ push_elems), pushN.U +& push.bits.head - count) val push_shr_pop = wshr((Seq.fill(pushN)(0.U.asTypeOf(gen)) ++ push_elems), pushN.U +& push.bits.head +& io.pop.bits.count - count) when (io.pop.fire) { for (i <- 0 until pushN) regs(i) := Mux(valid_shr(i), regs_shr(i), push_shr_pop(i)) } .elsewhen (push.fire) { for (i <- 0 until pushN) when (!valid(i)) { regs(i) := push_shr(i) } } val out_data = (0 until popN).map { i => Mux(valid(i), regs(i), push_shr(i)) } io.pop_data := VecInit(wshl(out_data, io.pop.bits.head).take(popN)) }
module Compactor_2( // @[Compactor.scala:16:7] input clock, // @[Compactor.scala:16:7] input reset, // @[Compactor.scala:16:7] output io_push_ready, // @[Compactor.scala:18:14] input io_push_valid, // @[Compactor.scala:18:14] input [3:0] io_push_bits_tail, // @[Compactor.scala:18:14] input io_push_data_0, // @[Compactor.scala:18:14] input io_push_data_1, // @[Compactor.scala:18:14] input io_push_data_2, // @[Compactor.scala:18:14] input io_push_data_3, // @[Compactor.scala:18:14] input io_push_data_4, // @[Compactor.scala:18:14] input io_push_data_5, // @[Compactor.scala:18:14] input io_push_data_6, // @[Compactor.scala:18:14] input io_push_data_7, // @[Compactor.scala:18:14] input io_push_data_8, // @[Compactor.scala:18:14] input io_push_data_9, // @[Compactor.scala:18:14] input io_push_data_10, // @[Compactor.scala:18:14] input io_push_data_11, // @[Compactor.scala:18:14] input io_push_data_12, // @[Compactor.scala:18:14] input io_push_data_13, // @[Compactor.scala:18:14] input io_push_data_14, // @[Compactor.scala:18:14] input io_push_data_15, // @[Compactor.scala:18:14] output io_pop_ready, // @[Compactor.scala:18:14] input io_pop_valid, // @[Compactor.scala:18:14] output io_pop_data_0 // @[Compactor.scala:18:14] ); wire _push_q_io_deq_valid; // @[Compactor.scala:29:24] wire [3:0] _push_q_io_deq_bits_head; // @[Compactor.scala:29:24] wire [3:0] _push_q_io_deq_bits_tail; // @[Compactor.scala:29:24] wire _push_q_io_deq_bits_data_0; // @[Compactor.scala:29:24] wire _push_q_io_deq_bits_data_1; // @[Compactor.scala:29:24] wire _push_q_io_deq_bits_data_2; // @[Compactor.scala:29:24] wire _push_q_io_deq_bits_data_3; // @[Compactor.scala:29:24] wire _push_q_io_deq_bits_data_4; // @[Compactor.scala:29:24] wire _push_q_io_deq_bits_data_5; // @[Compactor.scala:29:24] wire _push_q_io_deq_bits_data_6; // @[Compactor.scala:29:24] wire _push_q_io_deq_bits_data_7; // @[Compactor.scala:29:24] wire _push_q_io_deq_bits_data_8; // @[Compactor.scala:29:24] wire _push_q_io_deq_bits_data_9; // @[Compactor.scala:29:24] wire _push_q_io_deq_bits_data_10; // @[Compactor.scala:29:24] wire _push_q_io_deq_bits_data_11; // @[Compactor.scala:29:24] wire _push_q_io_deq_bits_data_12; // @[Compactor.scala:29:24] wire _push_q_io_deq_bits_data_13; // @[Compactor.scala:29:24] wire _push_q_io_deq_bits_data_14; // @[Compactor.scala:29:24] wire _push_q_io_deq_bits_data_15; // @[Compactor.scala:29:24] reg [4:0] count; // @[Compactor.scala:45:22] reg regs_0; // @[Compactor.scala:46:35] reg regs_1; // @[Compactor.scala:46:35] reg regs_2; // @[Compactor.scala:46:35] reg regs_3; // @[Compactor.scala:46:35] reg regs_4; // @[Compactor.scala:46:35] reg regs_5; // @[Compactor.scala:46:35] reg regs_6; // @[Compactor.scala:46:35] reg regs_7; // @[Compactor.scala:46:35] reg regs_8; // @[Compactor.scala:46:35] reg regs_9; // @[Compactor.scala:46:35] reg regs_10; // @[Compactor.scala:46:35] reg regs_11; // @[Compactor.scala:46:35] reg regs_12; // @[Compactor.scala:46:35] reg regs_13; // @[Compactor.scala:46:35] reg regs_14; // @[Compactor.scala:46:35] reg regs_15; // @[Compactor.scala:46:35] wire [31:0] _valid_T = 32'h1 << count; // @[Compactor.scala:45:22, :47:20] wire [16:0] _valid_T_1 = _valid_T[16:0] - 17'h1; // @[Compactor.scala:47:{20,30}] wire _count_T_1 = _push_q_io_deq_bits_tail == 4'h0; // @[Compactor.scala:13:24, :29:24] wire [4:0] _GEN = {1'h0, _push_q_io_deq_bits_tail}; // @[Compactor.scala:13:18, :16:7, :29:24] wire [4:0] _GEN_0 = {1'h0, _push_q_io_deq_bits_head}; // @[Compactor.scala:13:44, :16:7, :29:24] wire [5:0] _GEN_1 = {1'h0, count}; // @[Compactor.scala:16:7, :45:22, :49:79] wire push_q_io_deq_ready = {5'h0, io_pop_valid} + 6'h10 >= _GEN_1 + {1'h0, (_count_T_1 ? 5'h10 : _GEN) - _GEN_0}; // @[Compactor.scala:13:{18,24,44}, :16:7, :49:{25,70,79}] wire [5:0] _io_pop_ready_T_5 = _GEN_1 + {1'h0, _push_q_io_deq_valid ? (_count_T_1 ? 5'h10 : _GEN) - _GEN_0 : 5'h0}; // @[Compactor.scala:13:{18,24,44}, :16:7, :29:24, :49:79, :50:{25,31}] wire [4:0] _push_shr_T_1 = _GEN_0 - 5'h10 - count; // @[Compactor.scala:13:44, :45:22, :60:{87,105}] wire [31:0] _GEN_2 = {{_push_q_io_deq_bits_data_15}, {_push_q_io_deq_bits_data_14}, {_push_q_io_deq_bits_data_13}, {_push_q_io_deq_bits_data_12}, {_push_q_io_deq_bits_data_11}, {_push_q_io_deq_bits_data_10}, {_push_q_io_deq_bits_data_9}, {_push_q_io_deq_bits_data_8}, {_push_q_io_deq_bits_data_7}, {_push_q_io_deq_bits_data_6}, {_push_q_io_deq_bits_data_5}, {_push_q_io_deq_bits_data_4}, {_push_q_io_deq_bits_data_3}, {_push_q_io_deq_bits_data_2}, {_push_q_io_deq_bits_data_1}, {_push_q_io_deq_bits_data_0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}}; // @[Compactor.scala:16:7, :29:24, :64:44] wire [4:0] _push_shr_pop_T_6 = _GEN_0 - 5'hF - count; // @[Compactor.scala:13:44, :45:22, :61:{105,126}] wire [14:0] _GEN_3 = '{1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0}; wire [13:0] _GEN_4 = '{1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0}; wire [12:0] _GEN_5 = '{1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0}; wire [11:0] _GEN_6 = '{1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0}; wire [10:0] _GEN_7 = '{1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0}; wire [9:0] _GEN_8 = '{1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0}; wire [8:0] _GEN_9 = '{1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0}; wire [7:0] _GEN_10 = '{1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0}; wire [6:0] _GEN_11 = '{1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0}; wire [5:0] _GEN_12 = '{1'h0, 1'h0, 1'h0, 1'h0, 1'h0, 1'h0}; wire [4:0] _GEN_13 = '{1'h0, 1'h0, 1'h0, 1'h0, 1'h0}; wire [3:0] _GEN_14 = '{1'h0, 1'h0, 1'h0, 1'h0}; wire [2:0] _GEN_15 = '{1'h0, 1'h0, 1'h0}; wire [1:0] _GEN_16 = '{1'h0, 1'h0}; wire [0:0] _GEN_17 = '{1'h0}; wire _count_T = push_q_io_deq_ready & _push_q_io_deq_valid; // @[Decoupled.scala:51:35] wire _count_T_7 = (|_io_pop_ready_T_5) & io_pop_valid; // @[Decoupled.scala:51:35] wire [31:0] _GEN_18 = {_GEN_17, {{_push_q_io_deq_bits_data_15}, {_push_q_io_deq_bits_data_14}, {_push_q_io_deq_bits_data_13}, {_push_q_io_deq_bits_data_12}, {_push_q_io_deq_bits_data_11}, {_push_q_io_deq_bits_data_10}, {_push_q_io_deq_bits_data_9}, {_push_q_io_deq_bits_data_8}, {_push_q_io_deq_bits_data_7}, {_push_q_io_deq_bits_data_6}, {_push_q_io_deq_bits_data_5}, {_push_q_io_deq_bits_data_4}, {_push_q_io_deq_bits_data_3}, {_push_q_io_deq_bits_data_2}, {_push_q_io_deq_bits_data_1}, {_push_q_io_deq_bits_data_0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}}}; // @[Compactor.scala:16:7, :29:24, :64:44] wire [31:0] _GEN_19 = {_GEN_16, {{_push_q_io_deq_bits_data_15}, {_push_q_io_deq_bits_data_14}, {_push_q_io_deq_bits_data_13}, {_push_q_io_deq_bits_data_12}, {_push_q_io_deq_bits_data_11}, {_push_q_io_deq_bits_data_10}, {_push_q_io_deq_bits_data_9}, {_push_q_io_deq_bits_data_8}, {_push_q_io_deq_bits_data_7}, {_push_q_io_deq_bits_data_6}, {_push_q_io_deq_bits_data_5}, {_push_q_io_deq_bits_data_4}, {_push_q_io_deq_bits_data_3}, {_push_q_io_deq_bits_data_2}, {_push_q_io_deq_bits_data_1}, {_push_q_io_deq_bits_data_0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}}}; // @[Compactor.scala:16:7, :29:24, :64:44] wire [31:0] _GEN_20 = {_GEN_15, {{_push_q_io_deq_bits_data_15}, {_push_q_io_deq_bits_data_14}, {_push_q_io_deq_bits_data_13}, {_push_q_io_deq_bits_data_12}, {_push_q_io_deq_bits_data_11}, {_push_q_io_deq_bits_data_10}, {_push_q_io_deq_bits_data_9}, {_push_q_io_deq_bits_data_8}, {_push_q_io_deq_bits_data_7}, {_push_q_io_deq_bits_data_6}, {_push_q_io_deq_bits_data_5}, {_push_q_io_deq_bits_data_4}, {_push_q_io_deq_bits_data_3}, {_push_q_io_deq_bits_data_2}, {_push_q_io_deq_bits_data_1}, {_push_q_io_deq_bits_data_0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}}}; // @[Compactor.scala:16:7, :29:24, :64:44] wire [31:0] _GEN_21 = {_GEN_14, {{_push_q_io_deq_bits_data_15}, {_push_q_io_deq_bits_data_14}, {_push_q_io_deq_bits_data_13}, {_push_q_io_deq_bits_data_12}, {_push_q_io_deq_bits_data_11}, {_push_q_io_deq_bits_data_10}, {_push_q_io_deq_bits_data_9}, {_push_q_io_deq_bits_data_8}, {_push_q_io_deq_bits_data_7}, {_push_q_io_deq_bits_data_6}, {_push_q_io_deq_bits_data_5}, {_push_q_io_deq_bits_data_4}, {_push_q_io_deq_bits_data_3}, {_push_q_io_deq_bits_data_2}, {_push_q_io_deq_bits_data_1}, {_push_q_io_deq_bits_data_0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}}}; // @[Compactor.scala:16:7, :29:24, :64:44] wire [31:0] _GEN_22 = {_GEN_13, {{_push_q_io_deq_bits_data_15}, {_push_q_io_deq_bits_data_14}, {_push_q_io_deq_bits_data_13}, {_push_q_io_deq_bits_data_12}, {_push_q_io_deq_bits_data_11}, {_push_q_io_deq_bits_data_10}, {_push_q_io_deq_bits_data_9}, {_push_q_io_deq_bits_data_8}, {_push_q_io_deq_bits_data_7}, {_push_q_io_deq_bits_data_6}, {_push_q_io_deq_bits_data_5}, {_push_q_io_deq_bits_data_4}, {_push_q_io_deq_bits_data_3}, {_push_q_io_deq_bits_data_2}, {_push_q_io_deq_bits_data_1}, {_push_q_io_deq_bits_data_0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}}}; // @[Compactor.scala:16:7, :29:24, :64:44] wire [31:0] _GEN_23 = {_GEN_12, {{_push_q_io_deq_bits_data_15}, {_push_q_io_deq_bits_data_14}, {_push_q_io_deq_bits_data_13}, {_push_q_io_deq_bits_data_12}, {_push_q_io_deq_bits_data_11}, {_push_q_io_deq_bits_data_10}, {_push_q_io_deq_bits_data_9}, {_push_q_io_deq_bits_data_8}, {_push_q_io_deq_bits_data_7}, {_push_q_io_deq_bits_data_6}, {_push_q_io_deq_bits_data_5}, {_push_q_io_deq_bits_data_4}, {_push_q_io_deq_bits_data_3}, {_push_q_io_deq_bits_data_2}, {_push_q_io_deq_bits_data_1}, {_push_q_io_deq_bits_data_0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}}}; // @[Compactor.scala:16:7, :29:24, :64:44] wire [31:0] _GEN_24 = {_GEN_11, {{_push_q_io_deq_bits_data_15}, {_push_q_io_deq_bits_data_14}, {_push_q_io_deq_bits_data_13}, {_push_q_io_deq_bits_data_12}, {_push_q_io_deq_bits_data_11}, {_push_q_io_deq_bits_data_10}, {_push_q_io_deq_bits_data_9}, {_push_q_io_deq_bits_data_8}, {_push_q_io_deq_bits_data_7}, {_push_q_io_deq_bits_data_6}, {_push_q_io_deq_bits_data_5}, {_push_q_io_deq_bits_data_4}, {_push_q_io_deq_bits_data_3}, {_push_q_io_deq_bits_data_2}, {_push_q_io_deq_bits_data_1}, {_push_q_io_deq_bits_data_0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}}}; // @[Compactor.scala:16:7, :29:24, :64:44] wire [31:0] _GEN_25 = {_GEN_10, {{_push_q_io_deq_bits_data_15}, {_push_q_io_deq_bits_data_14}, {_push_q_io_deq_bits_data_13}, {_push_q_io_deq_bits_data_12}, {_push_q_io_deq_bits_data_11}, {_push_q_io_deq_bits_data_10}, {_push_q_io_deq_bits_data_9}, {_push_q_io_deq_bits_data_8}, {_push_q_io_deq_bits_data_7}, {_push_q_io_deq_bits_data_6}, {_push_q_io_deq_bits_data_5}, {_push_q_io_deq_bits_data_4}, {_push_q_io_deq_bits_data_3}, {_push_q_io_deq_bits_data_2}, {_push_q_io_deq_bits_data_1}, {_push_q_io_deq_bits_data_0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}}}; // @[Compactor.scala:16:7, :29:24, :64:44] wire [31:0] _GEN_26 = {_GEN_9, {{_push_q_io_deq_bits_data_15}, {_push_q_io_deq_bits_data_14}, {_push_q_io_deq_bits_data_13}, {_push_q_io_deq_bits_data_12}, {_push_q_io_deq_bits_data_11}, {_push_q_io_deq_bits_data_10}, {_push_q_io_deq_bits_data_9}, {_push_q_io_deq_bits_data_8}, {_push_q_io_deq_bits_data_7}, {_push_q_io_deq_bits_data_6}, {_push_q_io_deq_bits_data_5}, {_push_q_io_deq_bits_data_4}, {_push_q_io_deq_bits_data_3}, {_push_q_io_deq_bits_data_2}, {_push_q_io_deq_bits_data_1}, {_push_q_io_deq_bits_data_0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}}}; // @[Compactor.scala:16:7, :29:24, :64:44] wire [31:0] _GEN_27 = {_GEN_8, {{_push_q_io_deq_bits_data_15}, {_push_q_io_deq_bits_data_14}, {_push_q_io_deq_bits_data_13}, {_push_q_io_deq_bits_data_12}, {_push_q_io_deq_bits_data_11}, {_push_q_io_deq_bits_data_10}, {_push_q_io_deq_bits_data_9}, {_push_q_io_deq_bits_data_8}, {_push_q_io_deq_bits_data_7}, {_push_q_io_deq_bits_data_6}, {_push_q_io_deq_bits_data_5}, {_push_q_io_deq_bits_data_4}, {_push_q_io_deq_bits_data_3}, {_push_q_io_deq_bits_data_2}, {_push_q_io_deq_bits_data_1}, {_push_q_io_deq_bits_data_0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}}}; // @[Compactor.scala:16:7, :29:24, :64:44] wire [31:0] _GEN_28 = {_GEN_7, {{_push_q_io_deq_bits_data_15}, {_push_q_io_deq_bits_data_14}, {_push_q_io_deq_bits_data_13}, {_push_q_io_deq_bits_data_12}, {_push_q_io_deq_bits_data_11}, {_push_q_io_deq_bits_data_10}, {_push_q_io_deq_bits_data_9}, {_push_q_io_deq_bits_data_8}, {_push_q_io_deq_bits_data_7}, {_push_q_io_deq_bits_data_6}, {_push_q_io_deq_bits_data_5}, {_push_q_io_deq_bits_data_4}, {_push_q_io_deq_bits_data_3}, {_push_q_io_deq_bits_data_2}, {_push_q_io_deq_bits_data_1}, {_push_q_io_deq_bits_data_0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}}}; // @[Compactor.scala:16:7, :29:24, :64:44] wire [31:0] _GEN_29 = {_GEN_6, {{_push_q_io_deq_bits_data_15}, {_push_q_io_deq_bits_data_14}, {_push_q_io_deq_bits_data_13}, {_push_q_io_deq_bits_data_12}, {_push_q_io_deq_bits_data_11}, {_push_q_io_deq_bits_data_10}, {_push_q_io_deq_bits_data_9}, {_push_q_io_deq_bits_data_8}, {_push_q_io_deq_bits_data_7}, {_push_q_io_deq_bits_data_6}, {_push_q_io_deq_bits_data_5}, {_push_q_io_deq_bits_data_4}, {_push_q_io_deq_bits_data_3}, {_push_q_io_deq_bits_data_2}, {_push_q_io_deq_bits_data_1}, {_push_q_io_deq_bits_data_0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}}}; // @[Compactor.scala:16:7, :29:24, :64:44] wire [31:0] _GEN_30 = {_GEN_5, {{_push_q_io_deq_bits_data_15}, {_push_q_io_deq_bits_data_14}, {_push_q_io_deq_bits_data_13}, {_push_q_io_deq_bits_data_12}, {_push_q_io_deq_bits_data_11}, {_push_q_io_deq_bits_data_10}, {_push_q_io_deq_bits_data_9}, {_push_q_io_deq_bits_data_8}, {_push_q_io_deq_bits_data_7}, {_push_q_io_deq_bits_data_6}, {_push_q_io_deq_bits_data_5}, {_push_q_io_deq_bits_data_4}, {_push_q_io_deq_bits_data_3}, {_push_q_io_deq_bits_data_2}, {_push_q_io_deq_bits_data_1}, {_push_q_io_deq_bits_data_0}, {1'h0}, {1'h0}, {1'h0}}}; // @[Compactor.scala:16:7, :29:24, :64:44] wire [31:0] _GEN_31 = {_GEN_4, {{_push_q_io_deq_bits_data_15}, {_push_q_io_deq_bits_data_14}, {_push_q_io_deq_bits_data_13}, {_push_q_io_deq_bits_data_12}, {_push_q_io_deq_bits_data_11}, {_push_q_io_deq_bits_data_10}, {_push_q_io_deq_bits_data_9}, {_push_q_io_deq_bits_data_8}, {_push_q_io_deq_bits_data_7}, {_push_q_io_deq_bits_data_6}, {_push_q_io_deq_bits_data_5}, {_push_q_io_deq_bits_data_4}, {_push_q_io_deq_bits_data_3}, {_push_q_io_deq_bits_data_2}, {_push_q_io_deq_bits_data_1}, {_push_q_io_deq_bits_data_0}, {1'h0}, {1'h0}}}; // @[Compactor.scala:16:7, :29:24, :64:44] wire [31:0] _GEN_32 = {_GEN_3, {{_push_q_io_deq_bits_data_15}, {_push_q_io_deq_bits_data_14}, {_push_q_io_deq_bits_data_13}, {_push_q_io_deq_bits_data_12}, {_push_q_io_deq_bits_data_11}, {_push_q_io_deq_bits_data_10}, {_push_q_io_deq_bits_data_9}, {_push_q_io_deq_bits_data_8}, {_push_q_io_deq_bits_data_7}, {_push_q_io_deq_bits_data_6}, {_push_q_io_deq_bits_data_5}, {_push_q_io_deq_bits_data_4}, {_push_q_io_deq_bits_data_3}, {_push_q_io_deq_bits_data_2}, {_push_q_io_deq_bits_data_1}, {_push_q_io_deq_bits_data_0}, {1'h0}}}; // @[Compactor.scala:16:7, :29:24, :64:44] always @(posedge clock) begin // @[Compactor.scala:16:7] if (reset) // @[Compactor.scala:16:7] count <= 5'h0; // @[Compactor.scala:45:22] else if (_count_T | _count_T_7) // @[Decoupled.scala:51:35] count <= count + (_count_T ? (_count_T_1 ? 5'h10 : _GEN) - _GEN_0 : 5'h0) - {4'h0, _count_T_7}; // @[Decoupled.scala:51:35] if (_count_T_7) begin // @[Decoupled.scala:51:35] regs_0 <= _valid_T_1[1] ? regs_1 : _GEN_2[_push_shr_pop_T_6]; // @[Compactor.scala:46:35, :47:30, :61:126, :64:{44,54}] regs_1 <= _valid_T_1[2] ? regs_2 : _GEN_18[_push_shr_pop_T_6]; // @[Compactor.scala:46:35, :47:30, :61:126, :64:{44,54}] regs_2 <= _valid_T_1[3] ? regs_3 : _GEN_19[_push_shr_pop_T_6]; // @[Compactor.scala:46:35, :47:30, :61:126, :64:{44,54}] regs_3 <= _valid_T_1[4] ? regs_4 : _GEN_20[_push_shr_pop_T_6]; // @[Compactor.scala:46:35, :47:30, :61:126, :64:{44,54}] regs_4 <= _valid_T_1[5] ? regs_5 : _GEN_21[_push_shr_pop_T_6]; // @[Compactor.scala:46:35, :47:30, :61:126, :64:{44,54}] regs_5 <= _valid_T_1[6] ? regs_6 : _GEN_22[_push_shr_pop_T_6]; // @[Compactor.scala:46:35, :47:30, :61:126, :64:{44,54}] regs_6 <= _valid_T_1[7] ? regs_7 : _GEN_23[_push_shr_pop_T_6]; // @[Compactor.scala:46:35, :47:30, :61:126, :64:{44,54}] regs_7 <= _valid_T_1[8] ? regs_8 : _GEN_24[_push_shr_pop_T_6]; // @[Compactor.scala:46:35, :47:30, :61:126, :64:{44,54}] regs_8 <= _valid_T_1[9] ? regs_9 : _GEN_25[_push_shr_pop_T_6]; // @[Compactor.scala:46:35, :47:30, :61:126, :64:{44,54}] regs_9 <= _valid_T_1[10] ? regs_10 : _GEN_26[_push_shr_pop_T_6]; // @[Compactor.scala:46:35, :47:30, :61:126, :64:{44,54}] regs_10 <= _valid_T_1[11] ? regs_11 : _GEN_27[_push_shr_pop_T_6]; // @[Compactor.scala:46:35, :47:30, :61:126, :64:{44,54}] regs_11 <= _valid_T_1[12] ? regs_12 : _GEN_28[_push_shr_pop_T_6]; // @[Compactor.scala:46:35, :47:30, :61:126, :64:{44,54}] regs_12 <= _valid_T_1[13] ? regs_13 : _GEN_29[_push_shr_pop_T_6]; // @[Compactor.scala:46:35, :47:30, :61:126, :64:{44,54}] regs_13 <= _valid_T_1[14] ? regs_14 : _GEN_30[_push_shr_pop_T_6]; // @[Compactor.scala:46:35, :47:30, :61:126, :64:{44,54}] regs_14 <= _valid_T_1[15] ? regs_15 : _GEN_31[_push_shr_pop_T_6]; // @[Compactor.scala:46:35, :47:30, :61:126, :64:{44,54}] if (_valid_T_1[16]) begin // @[Compactor.scala:47:30, :64:54] end else // @[Compactor.scala:64:54] regs_15 <= _GEN_32[_push_shr_pop_T_6]; // @[Compactor.scala:46:35, :61:126, :64:44] end else begin // @[Decoupled.scala:51:35] if (_count_T & ~(_valid_T_1[0])) // @[Decoupled.scala:51:35] regs_0 <= _GEN_2[_push_shr_T_1]; // @[Compactor.scala:46:35, :60:105, :64:44, :67:15] if (_count_T & ~(_valid_T_1[1])) // @[Decoupled.scala:51:35] regs_1 <= _GEN_18[_push_shr_T_1]; // @[Compactor.scala:46:35, :60:105, :64:44, :67:15] if (_count_T & ~(_valid_T_1[2])) // @[Decoupled.scala:51:35] regs_2 <= _GEN_19[_push_shr_T_1]; // @[Compactor.scala:46:35, :60:105, :64:44, :67:15] if (_count_T & ~(_valid_T_1[3])) // @[Decoupled.scala:51:35] regs_3 <= _GEN_20[_push_shr_T_1]; // @[Compactor.scala:46:35, :60:105, :64:44, :67:15] if (_count_T & ~(_valid_T_1[4])) // @[Decoupled.scala:51:35] regs_4 <= _GEN_21[_push_shr_T_1]; // @[Compactor.scala:46:35, :60:105, :64:44, :67:15] if (_count_T & ~(_valid_T_1[5])) // @[Decoupled.scala:51:35] regs_5 <= _GEN_22[_push_shr_T_1]; // @[Compactor.scala:46:35, :60:105, :64:44, :67:15] if (_count_T & ~(_valid_T_1[6])) // @[Decoupled.scala:51:35] regs_6 <= _GEN_23[_push_shr_T_1]; // @[Compactor.scala:46:35, :60:105, :64:44, :67:15] if (_count_T & ~(_valid_T_1[7])) // @[Decoupled.scala:51:35] regs_7 <= _GEN_24[_push_shr_T_1]; // @[Compactor.scala:46:35, :60:105, :64:44, :67:15] if (_count_T & ~(_valid_T_1[8])) // @[Decoupled.scala:51:35] regs_8 <= _GEN_25[_push_shr_T_1]; // @[Compactor.scala:46:35, :60:105, :64:44, :67:15] if (_count_T & ~(_valid_T_1[9])) // @[Decoupled.scala:51:35] regs_9 <= _GEN_26[_push_shr_T_1]; // @[Compactor.scala:46:35, :60:105, :64:44, :67:15] if (_count_T & ~(_valid_T_1[10])) // @[Decoupled.scala:51:35] regs_10 <= _GEN_27[_push_shr_T_1]; // @[Compactor.scala:46:35, :60:105, :64:44, :67:15] if (_count_T & ~(_valid_T_1[11])) // @[Decoupled.scala:51:35] regs_11 <= _GEN_28[_push_shr_T_1]; // @[Compactor.scala:46:35, :60:105, :64:44, :67:15] if (_count_T & ~(_valid_T_1[12])) // @[Decoupled.scala:51:35] regs_12 <= _GEN_29[_push_shr_T_1]; // @[Compactor.scala:46:35, :60:105, :64:44, :67:15] if (_count_T & ~(_valid_T_1[13])) // @[Decoupled.scala:51:35] regs_13 <= _GEN_30[_push_shr_T_1]; // @[Compactor.scala:46:35, :60:105, :64:44, :67:15] if (_count_T & ~(_valid_T_1[14])) // @[Decoupled.scala:51:35] regs_14 <= _GEN_31[_push_shr_T_1]; // @[Compactor.scala:46:35, :60:105, :64:44, :67:15] if (_count_T & ~(_valid_T_1[15])) // @[Decoupled.scala:51:35] regs_15 <= _GEN_32[_push_shr_T_1]; // @[Compactor.scala:46:35, :60:105, :64:44, :67:15] end always @(posedge)
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w1_d3_i0_167( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7] wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire io_q_0; // @[SynchronizerReg.scala:80:7] assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_291 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_1), // @[SynchronizerReg.scala:87:41] .io_q (output_0) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } }
module PE_404( // @[PE.scala:31:7] input clock, // @[PE.scala:31:7] input reset, // @[PE.scala:31:7] input [7:0] io_in_a, // @[PE.scala:35:14] input [19:0] io_in_b, // @[PE.scala:35:14] input [19:0] io_in_d, // @[PE.scala:35:14] output [7:0] io_out_a, // @[PE.scala:35:14] output [19:0] io_out_b, // @[PE.scala:35:14] output [19:0] io_out_c, // @[PE.scala:35:14] input io_in_control_dataflow, // @[PE.scala:35:14] input io_in_control_propagate, // @[PE.scala:35:14] input [4:0] io_in_control_shift, // @[PE.scala:35:14] output io_out_control_dataflow, // @[PE.scala:35:14] output io_out_control_propagate, // @[PE.scala:35:14] output [4:0] io_out_control_shift, // @[PE.scala:35:14] input [2:0] io_in_id, // @[PE.scala:35:14] output [2:0] io_out_id, // @[PE.scala:35:14] input io_in_last, // @[PE.scala:35:14] output io_out_last, // @[PE.scala:35:14] input io_in_valid, // @[PE.scala:35:14] output io_out_valid // @[PE.scala:35:14] ); wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7] wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7] wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7] wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7] wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7] wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7] wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7] wire io_in_last_0 = io_in_last; // @[PE.scala:31:7] wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7] wire io_bad_dataflow = 1'h0; // @[PE.scala:31:7] wire _io_out_c_T_5 = 1'h0; // @[Arithmetic.scala:125:33] wire _io_out_c_T_6 = 1'h0; // @[Arithmetic.scala:125:60] wire _io_out_c_T_16 = 1'h0; // @[Arithmetic.scala:125:33] wire _io_out_c_T_17 = 1'h0; // @[Arithmetic.scala:125:60] wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7] wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37] wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37] wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35] wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7] wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7] wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7] wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7] wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7] wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7] wire [19:0] io_out_b_0; // @[PE.scala:31:7] wire [19:0] io_out_c_0; // @[PE.scala:31:7] reg [7:0] c1; // @[PE.scala:70:15] wire [7:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15] wire [7:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38] reg [7:0] c2; // @[PE.scala:71:15] wire [7:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15] wire [7:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38] reg last_s; // @[PE.scala:89:25] wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21] wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25] wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25] wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32] assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32] wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32] assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32] wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25] wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53] assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53] wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66] assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66] wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53] assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53] wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66] assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66] wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53] wire [7:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15] wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50] wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}] wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25] wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27] assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27] wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27] assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27] wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66] wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}] wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}] wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81] wire [31:0] _io_out_c_zeros_T_7 = {24'h0, _io_out_c_zeros_T_6[7:0] & _io_out_c_zeros_T_1}; // @[Arithmetic.scala:102:{45,52,81}] wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}] wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}] wire [7:0] _GEN_2 = {3'h0, shift_offset}; // @[PE.scala:91:25] wire [7:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15] wire [7:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30] assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30] wire [7:0] _io_out_c_T; // @[Arithmetic.scala:107:15] assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15] wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30] wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38] wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}] wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}] wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33] wire [8:0] _io_out_c_T_2 = {_io_out_c_T[7], _io_out_c_T} + {{7{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}] wire [7:0] _io_out_c_T_3 = _io_out_c_T_2[7:0]; // @[Arithmetic.scala:107:28] wire [7:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28] wire [19:0] _io_out_c_T_7 = {{12{_io_out_c_T_4[7]}}, _io_out_c_T_4}; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_8 = _io_out_c_T_7; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_9 = _io_out_c_T_8; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}] wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37] wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37] wire [7:0] _c1_T = io_in_d_0[7:0]; // @[PE.scala:31:7] wire [7:0] _c2_T = io_in_d_0[7:0]; // @[PE.scala:31:7] wire [7:0] _c1_T_1 = _c1_T; // @[Arithmetic.scala:114:{15,33}] wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53] wire [7:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15] wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50] wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}] wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66] wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}] wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}] wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81] wire [31:0] _io_out_c_zeros_T_16 = {24'h0, _io_out_c_zeros_T_15[7:0] & _io_out_c_zeros_T_10}; // @[Arithmetic.scala:102:{45,52,81}] wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}] wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}] wire [7:0] _GEN_4 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15] wire [7:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30] assign _io_out_c_ones_digit_T_1 = _GEN_4; // @[Arithmetic.scala:103:30] wire [7:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15] assign _io_out_c_T_11 = _GEN_4; // @[Arithmetic.scala:103:30, :107:15] wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30] wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38] wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}] wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}] wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33] wire [8:0] _io_out_c_T_13 = {_io_out_c_T_11[7], _io_out_c_T_11} + {{7{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}] wire [7:0] _io_out_c_T_14 = _io_out_c_T_13[7:0]; // @[Arithmetic.scala:107:28] wire [7:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28] wire [19:0] _io_out_c_T_18 = {{12{_io_out_c_T_15[7]}}, _io_out_c_T_15}; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_19 = _io_out_c_T_18; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_20 = _io_out_c_T_19; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}] wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37] wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37] wire [7:0] _c2_T_1 = _c2_T; // @[Arithmetic.scala:114:{15,33}] wire [7:0] _mac_unit_io_in_b_T_5; // @[PE.scala:121:38] assign _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38] wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5; // @[PE.scala:121:38] assign io_out_c_0 = io_in_control_propagate_0 ? {{12{c1[7]}}, c1} : {{12{c2[7]}}, c2}; // @[PE.scala:31:7, :70:15, :71:15, :119:30, :120:16, :126:16] wire [7:0] _mac_unit_io_in_b_T_7; // @[PE.scala:127:38] assign _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38] wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7; // @[PE.scala:127:38] wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35] wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35] always @(posedge clock) begin // @[PE.scala:31:7] if (io_in_valid_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :102:95, :141:17, :142:8] c1 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :70:15] if (~(~io_in_valid_0 | io_in_control_propagate_0)) // @[PE.scala:31:7, :71:15, :102:95, :119:30, :130:10, :141:{9,17}, :143:8] c2 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :71:15] if (io_in_valid_0) // @[PE.scala:31:7] last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25] always @(posedge) MacUnit_148 mac_unit ( // @[PE.scala:64:24] .clock (clock), .reset (reset), .io_in_a (io_in_a_0), // @[PE.scala:31:7] .io_in_b (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3), // @[PE.scala:31:7, :119:30, :121:{24,38}, :127:{24,38}] .io_in_c (io_in_b_0), // @[PE.scala:31:7] .io_out_d (io_out_b_0) ); // @[PE.scala:64:24] assign io_out_a = io_out_a_0; // @[PE.scala:31:7] assign io_out_b = io_out_b_0; // @[PE.scala:31:7] assign io_out_c = io_out_c_0; // @[PE.scala:31:7] assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7] assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7] assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7] assign io_out_id = io_out_id_0; // @[PE.scala:31:7] assign io_out_last = io_out_last_0; // @[PE.scala:31:7] assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_36( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [5:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_b_ready, // @[Monitor.scala:20:14] input io_in_b_valid, // @[Monitor.scala:20:14] input [1:0] io_in_b_bits_param, // @[Monitor.scala:20:14] input [5:0] io_in_b_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_b_bits_address, // @[Monitor.scala:20:14] input io_in_c_ready, // @[Monitor.scala:20:14] input io_in_c_valid, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_size, // @[Monitor.scala:20:14] input [5:0] io_in_c_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_c_bits_address, // @[Monitor.scala:20:14] input [63:0] io_in_c_bits_data, // @[Monitor.scala:20:14] input io_in_c_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [5:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt, // @[Monitor.scala:20:14] input io_in_e_valid, // @[Monitor.scala:20:14] input [2:0] io_in_e_bits_sink // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [5:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [31:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_b_ready_0 = io_in_b_ready; // @[Monitor.scala:36:7] wire io_in_b_valid_0 = io_in_b_valid; // @[Monitor.scala:36:7] wire [1:0] io_in_b_bits_param_0 = io_in_b_bits_param; // @[Monitor.scala:36:7] wire [5:0] io_in_b_bits_source_0 = io_in_b_bits_source; // @[Monitor.scala:36:7] wire [31:0] io_in_b_bits_address_0 = io_in_b_bits_address; // @[Monitor.scala:36:7] wire io_in_c_ready_0 = io_in_c_ready; // @[Monitor.scala:36:7] wire io_in_c_valid_0 = io_in_c_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_c_bits_opcode_0 = io_in_c_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_c_bits_param_0 = io_in_c_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_c_bits_size_0 = io_in_c_bits_size; // @[Monitor.scala:36:7] wire [5:0] io_in_c_bits_source_0 = io_in_c_bits_source; // @[Monitor.scala:36:7] wire [31:0] io_in_c_bits_address_0 = io_in_c_bits_address; // @[Monitor.scala:36:7] wire [63:0] io_in_c_bits_data_0 = io_in_c_bits_data; // @[Monitor.scala:36:7] wire io_in_c_bits_corrupt_0 = io_in_c_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [5:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7] wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_e_valid_0 = io_in_e_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_e_bits_sink_0 = io_in_e_bits_sink; // @[Monitor.scala:36:7] wire io_in_e_ready = 1'h1; // @[Monitor.scala:36:7] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_63 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_65 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_69 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_71 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_75 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_77 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_81 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_83 = 1'h1; // @[Parameters.scala:57:20] wire mask_sub_sub_sub_0_1_1 = 1'h1; // @[Misc.scala:206:21] wire mask_sub_sub_size_1 = 1'h1; // @[Misc.scala:209:26] wire mask_sub_sub_0_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_sub_1_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_0_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_1_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_2_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_3_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_size_1 = 1'h1; // @[Misc.scala:209:26] wire mask_acc_8 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_9 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_10 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_11 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_12 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_13 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_14 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_15 = 1'h1; // @[Misc.scala:215:29] wire _legal_source_T_3 = 1'h1; // @[Parameters.scala:56:32] wire _legal_source_T_5 = 1'h1; // @[Parameters.scala:57:20] wire _legal_source_T_9 = 1'h1; // @[Parameters.scala:56:32] wire _legal_source_T_11 = 1'h1; // @[Parameters.scala:57:20] wire _legal_source_T_15 = 1'h1; // @[Parameters.scala:56:32] wire _legal_source_T_17 = 1'h1; // @[Parameters.scala:57:20] wire _legal_source_T_21 = 1'h1; // @[Parameters.scala:56:32] wire _legal_source_T_23 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_123 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_125 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_129 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_131 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_135 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_137 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_141 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_143 = 1'h1; // @[Parameters.scala:57:20] wire _b_first_beats1_opdata_T = 1'h1; // @[Edges.scala:97:37] wire _b_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire b_first_last = 1'h1; // @[Edges.scala:232:33] wire [2:0] io_in_b_bits_opcode = 3'h6; // @[Monitor.scala:36:7] wire [2:0] io_in_b_bits_size = 3'h6; // @[Monitor.scala:36:7] wire [2:0] _mask_sizeOH_T_3 = 3'h6; // @[Misc.scala:202:34] wire [7:0] io_in_b_bits_mask = 8'hFF; // @[Monitor.scala:36:7] wire [7:0] mask_1 = 8'hFF; // @[Misc.scala:222:10] wire [63:0] io_in_b_bits_data = 64'h0; // @[Monitor.scala:36:7] wire io_in_b_bits_corrupt = 1'h0; // @[Monitor.scala:36:7] wire mask_sub_size_1 = 1'h0; // @[Misc.scala:209:26] wire _mask_sub_acc_T_4 = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_5 = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_6 = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_7 = 1'h0; // @[Misc.scala:215:38] wire _legal_source_T_42 = 1'h0; // @[Mux.scala:30:73] wire b_first_beats1_opdata = 1'h0; // @[Edges.scala:97:28] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [3:0] _mask_sizeOH_T_4 = 4'h4; // @[OneHot.scala:65:12] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48] wire [2:0] _mask_sizeOH_T_5 = 3'h4; // @[OneHot.scala:65:27] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] mask_sizeOH_1 = 3'h5; // @[Misc.scala:202:81] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] b_first_beats1 = 3'h0; // @[Edges.scala:221:14] wire [2:0] b_first_count = 3'h0; // @[Edges.scala:234:25] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] b_first_beats1_decode = 3'h7; // @[Edges.scala:220:59] wire [5:0] is_aligned_mask_1 = 6'h3F; // @[package.scala:243:46] wire [5:0] _b_first_beats1_decode_T_2 = 6'h3F; // @[package.scala:243:46] wire [5:0] _is_aligned_mask_T_3 = 6'h0; // @[package.scala:243:76] wire [5:0] _b_first_beats1_decode_T_1 = 6'h0; // @[package.scala:243:76] wire [12:0] _is_aligned_mask_T_2 = 13'hFC0; // @[package.scala:243:71] wire [12:0] _b_first_beats1_decode_T = 13'hFC0; // @[package.scala:243:71] wire [3:0] mask_lo_1 = 4'hF; // @[Misc.scala:222:10] wire [3:0] mask_hi_1 = 4'hF; // @[Misc.scala:222:10] wire [1:0] mask_lo_lo_1 = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_lo_hi_1 = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo_1 = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_hi_hi_1 = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_sizeOH_shiftAmount_1 = 2'h2; // @[OneHot.scala:64:49] wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire [5:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_44 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_45 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_46 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_47 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _legal_source_uncommonBits_T = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _legal_source_uncommonBits_T_1 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _legal_source_uncommonBits_T_2 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _legal_source_uncommonBits_T_3 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_48 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_49 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_50 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_51 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_8 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_9 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_10 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_11 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_52 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_53 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_54 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_55 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_56 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_57 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_58 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_59 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_60 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_61 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_62 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_63 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_64 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_65 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_66 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_67 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_68 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_69 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_70 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_71 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_4 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire _source_ok_T = io_in_a_bits_source_0 == 6'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _source_ok_T_1 = io_in_a_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_7 = io_in_a_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_13 = io_in_a_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_19 = io_in_a_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire _source_ok_T_2 = _source_ok_T_1 == 4'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_8 = _source_ok_T_7 == 4'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_14 = _source_ok_T_13 == 4'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_20 = _source_ok_T_19 == 4'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31] wire _source_ok_T_25 = io_in_a_bits_source_0 == 6'h3C; // @[Monitor.scala:36:7] wire _source_ok_WIRE_5 = _source_ok_T_25; // @[Parameters.scala:1138:31] wire _source_ok_T_26 = io_in_a_bits_source_0 == 6'h3E; // @[Monitor.scala:36:7] wire _source_ok_WIRE_6 = _source_ok_T_26; // @[Parameters.scala:1138:31] wire _source_ok_T_27 = io_in_a_bits_source_0 == 6'h38; // @[Monitor.scala:36:7] wire _source_ok_WIRE_7 = _source_ok_T_27; // @[Parameters.scala:1138:31] wire _source_ok_T_28 = io_in_a_bits_source_0 == 6'h3A; // @[Monitor.scala:36:7] wire _source_ok_WIRE_8 = _source_ok_T_28; // @[Parameters.scala:1138:31] wire _source_ok_T_29 = io_in_a_bits_source_0 == 6'h34; // @[Monitor.scala:36:7] wire _source_ok_WIRE_9 = _source_ok_T_29; // @[Parameters.scala:1138:31] wire _source_ok_T_30 = io_in_a_bits_source_0 == 6'h36; // @[Monitor.scala:36:7] wire _source_ok_WIRE_10 = _source_ok_T_30; // @[Parameters.scala:1138:31] wire _source_ok_T_31 = io_in_a_bits_source_0 == 6'h30; // @[Monitor.scala:36:7] wire _source_ok_WIRE_11 = _source_ok_T_31; // @[Parameters.scala:1138:31] wire _source_ok_T_32 = io_in_a_bits_source_0 == 6'h32; // @[Monitor.scala:36:7] wire _source_ok_WIRE_12 = _source_ok_T_32; // @[Parameters.scala:1138:31] wire _source_ok_T_33 = io_in_a_bits_source_0 == 6'h2C; // @[Monitor.scala:36:7] wire _source_ok_WIRE_13 = _source_ok_T_33; // @[Parameters.scala:1138:31] wire _source_ok_T_34 = io_in_a_bits_source_0 == 6'h2E; // @[Monitor.scala:36:7] wire _source_ok_WIRE_14 = _source_ok_T_34; // @[Parameters.scala:1138:31] wire _source_ok_T_35 = io_in_a_bits_source_0 == 6'h28; // @[Monitor.scala:36:7] wire _source_ok_WIRE_15 = _source_ok_T_35; // @[Parameters.scala:1138:31] wire _source_ok_T_36 = io_in_a_bits_source_0 == 6'h2A; // @[Monitor.scala:36:7] wire _source_ok_WIRE_16 = _source_ok_T_36; // @[Parameters.scala:1138:31] wire _source_ok_T_37 = io_in_a_bits_source_0 == 6'h24; // @[Monitor.scala:36:7] wire _source_ok_WIRE_17 = _source_ok_T_37; // @[Parameters.scala:1138:31] wire _source_ok_T_38 = io_in_a_bits_source_0 == 6'h26; // @[Monitor.scala:36:7] wire _source_ok_WIRE_18 = _source_ok_T_38; // @[Parameters.scala:1138:31] wire _source_ok_T_39 = io_in_a_bits_source_0 == 6'h20; // @[Monitor.scala:36:7] wire _source_ok_WIRE_19 = _source_ok_T_39; // @[Parameters.scala:1138:31] wire _source_ok_T_40 = io_in_a_bits_source_0 == 6'h22; // @[Monitor.scala:36:7] wire _source_ok_WIRE_20 = _source_ok_T_40; // @[Parameters.scala:1138:31] wire _source_ok_T_41 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_42 = _source_ok_T_41 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_43 = _source_ok_T_42 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_44 = _source_ok_T_43 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_45 = _source_ok_T_44 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_46 = _source_ok_T_45 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_47 = _source_ok_T_46 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_48 = _source_ok_T_47 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_49 = _source_ok_T_48 | _source_ok_WIRE_9; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_50 = _source_ok_T_49 | _source_ok_WIRE_10; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_51 = _source_ok_T_50 | _source_ok_WIRE_11; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_52 = _source_ok_T_51 | _source_ok_WIRE_12; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_53 = _source_ok_T_52 | _source_ok_WIRE_13; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_54 = _source_ok_T_53 | _source_ok_WIRE_14; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_55 = _source_ok_T_54 | _source_ok_WIRE_15; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_56 = _source_ok_T_55 | _source_ok_WIRE_16; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_57 = _source_ok_T_56 | _source_ok_WIRE_17; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_58 = _source_ok_T_57 | _source_ok_WIRE_18; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_59 = _source_ok_T_58 | _source_ok_WIRE_19; // @[Parameters.scala:1138:31, :1139:46] wire source_ok = _source_ok_T_59 | _source_ok_WIRE_20; // @[Parameters.scala:1138:31, :1139:46] wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71] wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [31:0] _is_aligned_T = {26'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 32'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_60 = io_in_d_bits_source_0 == 6'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_0 = _source_ok_T_60; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _source_ok_T_61 = io_in_d_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_67 = io_in_d_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_73 = io_in_d_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_79 = io_in_d_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire _source_ok_T_62 = _source_ok_T_61 == 4'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_64 = _source_ok_T_62; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_66 = _source_ok_T_64; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_1 = _source_ok_T_66; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_68 = _source_ok_T_67 == 4'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_70 = _source_ok_T_68; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_72 = _source_ok_T_70; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_2 = _source_ok_T_72; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_74 = _source_ok_T_73 == 4'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_76 = _source_ok_T_74; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_78 = _source_ok_T_76; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_3 = _source_ok_T_78; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_80 = _source_ok_T_79 == 4'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_82 = _source_ok_T_80; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_84 = _source_ok_T_82; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_4 = _source_ok_T_84; // @[Parameters.scala:1138:31] wire _source_ok_T_85 = io_in_d_bits_source_0 == 6'h3C; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_5 = _source_ok_T_85; // @[Parameters.scala:1138:31] wire _source_ok_T_86 = io_in_d_bits_source_0 == 6'h3E; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_6 = _source_ok_T_86; // @[Parameters.scala:1138:31] wire _source_ok_T_87 = io_in_d_bits_source_0 == 6'h38; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_7 = _source_ok_T_87; // @[Parameters.scala:1138:31] wire _source_ok_T_88 = io_in_d_bits_source_0 == 6'h3A; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_8 = _source_ok_T_88; // @[Parameters.scala:1138:31] wire _source_ok_T_89 = io_in_d_bits_source_0 == 6'h34; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_9 = _source_ok_T_89; // @[Parameters.scala:1138:31] wire _source_ok_T_90 = io_in_d_bits_source_0 == 6'h36; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_10 = _source_ok_T_90; // @[Parameters.scala:1138:31] wire _source_ok_T_91 = io_in_d_bits_source_0 == 6'h30; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_11 = _source_ok_T_91; // @[Parameters.scala:1138:31] wire _source_ok_T_92 = io_in_d_bits_source_0 == 6'h32; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_12 = _source_ok_T_92; // @[Parameters.scala:1138:31] wire _source_ok_T_93 = io_in_d_bits_source_0 == 6'h2C; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_13 = _source_ok_T_93; // @[Parameters.scala:1138:31] wire _source_ok_T_94 = io_in_d_bits_source_0 == 6'h2E; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_14 = _source_ok_T_94; // @[Parameters.scala:1138:31] wire _source_ok_T_95 = io_in_d_bits_source_0 == 6'h28; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_15 = _source_ok_T_95; // @[Parameters.scala:1138:31] wire _source_ok_T_96 = io_in_d_bits_source_0 == 6'h2A; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_16 = _source_ok_T_96; // @[Parameters.scala:1138:31] wire _source_ok_T_97 = io_in_d_bits_source_0 == 6'h24; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_17 = _source_ok_T_97; // @[Parameters.scala:1138:31] wire _source_ok_T_98 = io_in_d_bits_source_0 == 6'h26; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_18 = _source_ok_T_98; // @[Parameters.scala:1138:31] wire _source_ok_T_99 = io_in_d_bits_source_0 == 6'h20; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_19 = _source_ok_T_99; // @[Parameters.scala:1138:31] wire _source_ok_T_100 = io_in_d_bits_source_0 == 6'h22; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_20 = _source_ok_T_100; // @[Parameters.scala:1138:31] wire _source_ok_T_101 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_102 = _source_ok_T_101 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_103 = _source_ok_T_102 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_104 = _source_ok_T_103 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_105 = _source_ok_T_104 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_106 = _source_ok_T_105 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_107 = _source_ok_T_106 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_108 = _source_ok_T_107 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_109 = _source_ok_T_108 | _source_ok_WIRE_1_9; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_110 = _source_ok_T_109 | _source_ok_WIRE_1_10; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_111 = _source_ok_T_110 | _source_ok_WIRE_1_11; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_112 = _source_ok_T_111 | _source_ok_WIRE_1_12; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_113 = _source_ok_T_112 | _source_ok_WIRE_1_13; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_114 = _source_ok_T_113 | _source_ok_WIRE_1_14; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_115 = _source_ok_T_114 | _source_ok_WIRE_1_15; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_116 = _source_ok_T_115 | _source_ok_WIRE_1_16; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_117 = _source_ok_T_116 | _source_ok_WIRE_1_17; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_118 = _source_ok_T_117 | _source_ok_WIRE_1_18; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_119 = _source_ok_T_118 | _source_ok_WIRE_1_19; // @[Parameters.scala:1138:31, :1139:46] wire source_ok_1 = _source_ok_T_119 | _source_ok_WIRE_1_20; // @[Parameters.scala:1138:31, :1139:46] wire sink_ok = io_in_d_bits_sink_0 != 3'h7; // @[Monitor.scala:36:7, :309:31] wire _legal_source_T = io_in_b_bits_source_0 == 6'h10; // @[Monitor.scala:36:7] wire [1:0] uncommonBits_44 = _uncommonBits_T_44[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _legal_source_T_1 = io_in_b_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _legal_source_T_7 = io_in_b_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _legal_source_T_13 = io_in_b_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _legal_source_T_19 = io_in_b_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_46 = _uncommonBits_T_46[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_47 = _uncommonBits_T_47[1:0]; // @[Parameters.scala:52:{29,56}] wire _legal_source_T_25 = io_in_b_bits_source_0 == 6'h3C; // @[Monitor.scala:36:7] wire _legal_source_T_26 = io_in_b_bits_source_0 == 6'h3E; // @[Monitor.scala:36:7] wire _legal_source_T_27 = io_in_b_bits_source_0 == 6'h38; // @[Monitor.scala:36:7] wire _legal_source_T_28 = io_in_b_bits_source_0 == 6'h3A; // @[Monitor.scala:36:7] wire _legal_source_T_29 = io_in_b_bits_source_0 == 6'h34; // @[Monitor.scala:36:7] wire _legal_source_T_30 = io_in_b_bits_source_0 == 6'h36; // @[Monitor.scala:36:7] wire _legal_source_T_31 = io_in_b_bits_source_0 == 6'h30; // @[Monitor.scala:36:7] wire _legal_source_T_32 = io_in_b_bits_source_0 == 6'h32; // @[Monitor.scala:36:7] wire _legal_source_T_33 = io_in_b_bits_source_0 == 6'h2C; // @[Monitor.scala:36:7] wire _legal_source_T_34 = io_in_b_bits_source_0 == 6'h2E; // @[Monitor.scala:36:7] wire _legal_source_T_35 = io_in_b_bits_source_0 == 6'h28; // @[Monitor.scala:36:7] wire _legal_source_T_36 = io_in_b_bits_source_0 == 6'h2A; // @[Monitor.scala:36:7] wire _legal_source_T_37 = io_in_b_bits_source_0 == 6'h24; // @[Monitor.scala:36:7] wire _legal_source_T_38 = io_in_b_bits_source_0 == 6'h26; // @[Monitor.scala:36:7] wire _legal_source_T_39 = io_in_b_bits_source_0 == 6'h20; // @[Monitor.scala:36:7] wire _legal_source_T_40 = io_in_b_bits_source_0 == 6'h22; // @[Monitor.scala:36:7] wire [27:0] _GEN_0 = io_in_b_bits_address_0[27:0] ^ 28'h8000000; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T = {io_in_b_bits_address_0[31:28], _GEN_0}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_1 = {1'h0, _address_ok_T}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_2 = _address_ok_T_1 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_3 = _address_ok_T_2; // @[Parameters.scala:137:46] wire _address_ok_T_4 = _address_ok_T_3 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_0 = _address_ok_T_4; // @[Parameters.scala:612:40] wire [31:0] _address_ok_T_5 = io_in_b_bits_address_0 ^ 32'h80000000; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_6 = {1'h0, _address_ok_T_5}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_7 = _address_ok_T_6 & 33'h1F0000000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_8 = _address_ok_T_7; // @[Parameters.scala:137:46] wire _address_ok_T_9 = _address_ok_T_8 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_1 = _address_ok_T_9; // @[Parameters.scala:612:40] wire address_ok = _address_ok_WIRE_0 | _address_ok_WIRE_1; // @[Parameters.scala:612:40, :636:64] wire [31:0] _is_aligned_T_1 = {26'h0, io_in_b_bits_address_0[5:0]}; // @[Monitor.scala:36:7] wire is_aligned_1 = _is_aligned_T_1 == 32'h0; // @[Edges.scala:21:{16,24}] wire mask_sub_sub_bit_1 = io_in_b_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2_1 = mask_sub_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit_1 = ~mask_sub_sub_bit_1; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2_1 = mask_sub_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T_2 = mask_sub_sub_0_2_1; // @[Misc.scala:214:27, :215:38] wire _mask_sub_sub_acc_T_3 = mask_sub_sub_1_2_1; // @[Misc.scala:214:27, :215:38] wire mask_sub_bit_1 = io_in_b_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit_1 = ~mask_sub_bit_1; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2_1 = mask_sub_sub_0_2_1 & mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire mask_sub_1_2_1 = mask_sub_sub_0_2_1 & mask_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire mask_sub_2_2_1 = mask_sub_sub_1_2_1 & mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire mask_sub_3_2_1 = mask_sub_sub_1_2_1 & mask_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire mask_bit_1 = io_in_b_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit_1 = ~mask_bit_1; // @[Misc.scala:210:26, :211:20] wire mask_eq_8 = mask_sub_0_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_8 = mask_eq_8; // @[Misc.scala:214:27, :215:38] wire mask_eq_9 = mask_sub_0_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_9 = mask_eq_9; // @[Misc.scala:214:27, :215:38] wire mask_eq_10 = mask_sub_1_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_10 = mask_eq_10; // @[Misc.scala:214:27, :215:38] wire mask_eq_11 = mask_sub_1_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_11 = mask_eq_11; // @[Misc.scala:214:27, :215:38] wire mask_eq_12 = mask_sub_2_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_12 = mask_eq_12; // @[Misc.scala:214:27, :215:38] wire mask_eq_13 = mask_sub_2_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_13 = mask_eq_13; // @[Misc.scala:214:27, :215:38] wire mask_eq_14 = mask_sub_3_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_14 = mask_eq_14; // @[Misc.scala:214:27, :215:38] wire mask_eq_15 = mask_sub_3_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_15 = mask_eq_15; // @[Misc.scala:214:27, :215:38] wire _legal_source_WIRE_0 = _legal_source_T; // @[Parameters.scala:1138:31] wire [1:0] legal_source_uncommonBits = _legal_source_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire _legal_source_T_2 = _legal_source_T_1 == 4'h0; // @[Parameters.scala:54:{10,32}] wire _legal_source_T_4 = _legal_source_T_2; // @[Parameters.scala:54:{32,67}] wire _legal_source_T_6 = _legal_source_T_4; // @[Parameters.scala:54:67, :56:48] wire _legal_source_WIRE_1 = _legal_source_T_6; // @[Parameters.scala:1138:31] wire [1:0] legal_source_uncommonBits_1 = _legal_source_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire _legal_source_T_8 = _legal_source_T_7 == 4'h1; // @[Parameters.scala:54:{10,32}] wire _legal_source_T_10 = _legal_source_T_8; // @[Parameters.scala:54:{32,67}] wire _legal_source_T_12 = _legal_source_T_10; // @[Parameters.scala:54:67, :56:48] wire _legal_source_WIRE_2 = _legal_source_T_12; // @[Parameters.scala:1138:31] wire [1:0] legal_source_uncommonBits_2 = _legal_source_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire _legal_source_T_14 = _legal_source_T_13 == 4'h2; // @[Parameters.scala:54:{10,32}] wire _legal_source_T_16 = _legal_source_T_14; // @[Parameters.scala:54:{32,67}] wire _legal_source_T_18 = _legal_source_T_16; // @[Parameters.scala:54:67, :56:48] wire _legal_source_WIRE_3 = _legal_source_T_18; // @[Parameters.scala:1138:31] wire [1:0] legal_source_uncommonBits_3 = _legal_source_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire _legal_source_T_20 = _legal_source_T_19 == 4'h3; // @[Parameters.scala:54:{10,32}] wire _legal_source_T_22 = _legal_source_T_20; // @[Parameters.scala:54:{32,67}] wire _legal_source_T_24 = _legal_source_T_22; // @[Parameters.scala:54:67, :56:48] wire _legal_source_WIRE_4 = _legal_source_T_24; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_5 = _legal_source_T_25; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_6 = _legal_source_T_26; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_7 = _legal_source_T_27; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_8 = _legal_source_T_28; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_9 = _legal_source_T_29; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_10 = _legal_source_T_30; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_11 = _legal_source_T_31; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_12 = _legal_source_T_32; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_13 = _legal_source_T_33; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_14 = _legal_source_T_34; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_15 = _legal_source_T_35; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_16 = _legal_source_T_36; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_17 = _legal_source_T_37; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_18 = _legal_source_T_38; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_19 = _legal_source_T_39; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_20 = _legal_source_T_40; // @[Parameters.scala:1138:31] wire [4:0] _legal_source_T_41 = {_legal_source_WIRE_0, 4'h0}; // @[Mux.scala:30:73] wire [4:0] _legal_source_T_62 = _legal_source_T_41; // @[Mux.scala:30:73] wire [2:0] _legal_source_T_43 = {_legal_source_WIRE_2, 2'h0}; // @[Mux.scala:30:73] wire [3:0] _legal_source_T_44 = {_legal_source_WIRE_3, 3'h0}; // @[Mux.scala:30:73] wire [3:0] _legal_source_T_45 = _legal_source_WIRE_4 ? 4'hC : 4'h0; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_46 = _legal_source_WIRE_5 ? 6'h3C : 6'h0; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_47 = _legal_source_WIRE_6 ? 6'h3E : 6'h0; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_48 = _legal_source_WIRE_7 ? 6'h38 : 6'h0; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_49 = _legal_source_WIRE_8 ? 6'h3A : 6'h0; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_50 = _legal_source_WIRE_9 ? 6'h34 : 6'h0; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_51 = _legal_source_WIRE_10 ? 6'h36 : 6'h0; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_52 = _legal_source_WIRE_11 ? 6'h30 : 6'h0; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_53 = _legal_source_WIRE_12 ? 6'h32 : 6'h0; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_54 = _legal_source_WIRE_13 ? 6'h2C : 6'h0; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_55 = _legal_source_WIRE_14 ? 6'h2E : 6'h0; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_56 = _legal_source_WIRE_15 ? 6'h28 : 6'h0; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_57 = _legal_source_WIRE_16 ? 6'h2A : 6'h0; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_58 = _legal_source_WIRE_17 ? 6'h24 : 6'h0; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_59 = _legal_source_WIRE_18 ? 6'h26 : 6'h0; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_60 = {_legal_source_WIRE_19, 5'h0}; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_61 = _legal_source_WIRE_20 ? 6'h22 : 6'h0; // @[Mux.scala:30:73] wire [4:0] _legal_source_T_63 = {_legal_source_T_62[4:3], _legal_source_T_62[2:0] | _legal_source_T_43}; // @[Mux.scala:30:73] wire [4:0] _legal_source_T_64 = {_legal_source_T_63[4], _legal_source_T_63[3:0] | _legal_source_T_44}; // @[Mux.scala:30:73] wire [4:0] _legal_source_T_65 = {_legal_source_T_64[4], _legal_source_T_64[3:0] | _legal_source_T_45}; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_66 = {1'h0, _legal_source_T_65} | _legal_source_T_46; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_67 = _legal_source_T_66 | _legal_source_T_47; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_68 = _legal_source_T_67 | _legal_source_T_48; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_69 = _legal_source_T_68 | _legal_source_T_49; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_70 = _legal_source_T_69 | _legal_source_T_50; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_71 = _legal_source_T_70 | _legal_source_T_51; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_72 = _legal_source_T_71 | _legal_source_T_52; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_73 = _legal_source_T_72 | _legal_source_T_53; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_74 = _legal_source_T_73 | _legal_source_T_54; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_75 = _legal_source_T_74 | _legal_source_T_55; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_76 = _legal_source_T_75 | _legal_source_T_56; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_77 = _legal_source_T_76 | _legal_source_T_57; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_78 = _legal_source_T_77 | _legal_source_T_58; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_79 = _legal_source_T_78 | _legal_source_T_59; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_80 = _legal_source_T_79 | _legal_source_T_60; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_81 = _legal_source_T_80 | _legal_source_T_61; // @[Mux.scala:30:73] wire [5:0] _legal_source_WIRE_1_0 = _legal_source_T_81; // @[Mux.scala:30:73] wire legal_source = _legal_source_WIRE_1_0 == io_in_b_bits_source_0; // @[Mux.scala:30:73] wire [1:0] uncommonBits_48 = _uncommonBits_T_48[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_49 = _uncommonBits_T_49[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_120 = io_in_c_bits_source_0 == 6'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_0 = _source_ok_T_120; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _source_ok_T_121 = io_in_c_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_127 = io_in_c_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_133 = io_in_c_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_139 = io_in_c_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire _source_ok_T_122 = _source_ok_T_121 == 4'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_124 = _source_ok_T_122; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_126 = _source_ok_T_124; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2_1 = _source_ok_T_126; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_128 = _source_ok_T_127 == 4'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_130 = _source_ok_T_128; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_132 = _source_ok_T_130; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2_2 = _source_ok_T_132; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_10 = _source_ok_uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_134 = _source_ok_T_133 == 4'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_136 = _source_ok_T_134; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_138 = _source_ok_T_136; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2_3 = _source_ok_T_138; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_11 = _source_ok_uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_140 = _source_ok_T_139 == 4'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_142 = _source_ok_T_140; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_144 = _source_ok_T_142; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2_4 = _source_ok_T_144; // @[Parameters.scala:1138:31] wire _source_ok_T_145 = io_in_c_bits_source_0 == 6'h3C; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_5 = _source_ok_T_145; // @[Parameters.scala:1138:31] wire _source_ok_T_146 = io_in_c_bits_source_0 == 6'h3E; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_6 = _source_ok_T_146; // @[Parameters.scala:1138:31] wire _source_ok_T_147 = io_in_c_bits_source_0 == 6'h38; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_7 = _source_ok_T_147; // @[Parameters.scala:1138:31] wire _source_ok_T_148 = io_in_c_bits_source_0 == 6'h3A; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_8 = _source_ok_T_148; // @[Parameters.scala:1138:31] wire _source_ok_T_149 = io_in_c_bits_source_0 == 6'h34; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_9 = _source_ok_T_149; // @[Parameters.scala:1138:31] wire _source_ok_T_150 = io_in_c_bits_source_0 == 6'h36; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_10 = _source_ok_T_150; // @[Parameters.scala:1138:31] wire _source_ok_T_151 = io_in_c_bits_source_0 == 6'h30; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_11 = _source_ok_T_151; // @[Parameters.scala:1138:31] wire _source_ok_T_152 = io_in_c_bits_source_0 == 6'h32; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_12 = _source_ok_T_152; // @[Parameters.scala:1138:31] wire _source_ok_T_153 = io_in_c_bits_source_0 == 6'h2C; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_13 = _source_ok_T_153; // @[Parameters.scala:1138:31] wire _source_ok_T_154 = io_in_c_bits_source_0 == 6'h2E; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_14 = _source_ok_T_154; // @[Parameters.scala:1138:31] wire _source_ok_T_155 = io_in_c_bits_source_0 == 6'h28; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_15 = _source_ok_T_155; // @[Parameters.scala:1138:31] wire _source_ok_T_156 = io_in_c_bits_source_0 == 6'h2A; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_16 = _source_ok_T_156; // @[Parameters.scala:1138:31] wire _source_ok_T_157 = io_in_c_bits_source_0 == 6'h24; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_17 = _source_ok_T_157; // @[Parameters.scala:1138:31] wire _source_ok_T_158 = io_in_c_bits_source_0 == 6'h26; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_18 = _source_ok_T_158; // @[Parameters.scala:1138:31] wire _source_ok_T_159 = io_in_c_bits_source_0 == 6'h20; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_19 = _source_ok_T_159; // @[Parameters.scala:1138:31] wire _source_ok_T_160 = io_in_c_bits_source_0 == 6'h22; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_20 = _source_ok_T_160; // @[Parameters.scala:1138:31] wire _source_ok_T_161 = _source_ok_WIRE_2_0 | _source_ok_WIRE_2_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_162 = _source_ok_T_161 | _source_ok_WIRE_2_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_163 = _source_ok_T_162 | _source_ok_WIRE_2_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_164 = _source_ok_T_163 | _source_ok_WIRE_2_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_165 = _source_ok_T_164 | _source_ok_WIRE_2_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_166 = _source_ok_T_165 | _source_ok_WIRE_2_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_167 = _source_ok_T_166 | _source_ok_WIRE_2_7; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_168 = _source_ok_T_167 | _source_ok_WIRE_2_8; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_169 = _source_ok_T_168 | _source_ok_WIRE_2_9; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_170 = _source_ok_T_169 | _source_ok_WIRE_2_10; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_171 = _source_ok_T_170 | _source_ok_WIRE_2_11; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_172 = _source_ok_T_171 | _source_ok_WIRE_2_12; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_173 = _source_ok_T_172 | _source_ok_WIRE_2_13; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_174 = _source_ok_T_173 | _source_ok_WIRE_2_14; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_175 = _source_ok_T_174 | _source_ok_WIRE_2_15; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_176 = _source_ok_T_175 | _source_ok_WIRE_2_16; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_177 = _source_ok_T_176 | _source_ok_WIRE_2_17; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_178 = _source_ok_T_177 | _source_ok_WIRE_2_18; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_179 = _source_ok_T_178 | _source_ok_WIRE_2_19; // @[Parameters.scala:1138:31, :1139:46] wire source_ok_2 = _source_ok_T_179 | _source_ok_WIRE_2_20; // @[Parameters.scala:1138:31, :1139:46] wire [12:0] _GEN_1 = 13'h3F << io_in_c_bits_size_0; // @[package.scala:243:71] wire [12:0] _is_aligned_mask_T_4; // @[package.scala:243:71] assign _is_aligned_mask_T_4 = _GEN_1; // @[package.scala:243:71] wire [12:0] _c_first_beats1_decode_T; // @[package.scala:243:71] assign _c_first_beats1_decode_T = _GEN_1; // @[package.scala:243:71] wire [12:0] _c_first_beats1_decode_T_3; // @[package.scala:243:71] assign _c_first_beats1_decode_T_3 = _GEN_1; // @[package.scala:243:71] wire [5:0] _is_aligned_mask_T_5 = _is_aligned_mask_T_4[5:0]; // @[package.scala:243:{71,76}] wire [5:0] is_aligned_mask_2 = ~_is_aligned_mask_T_5; // @[package.scala:243:{46,76}] wire [31:0] _is_aligned_T_2 = {26'h0, io_in_c_bits_address_0[5:0] & is_aligned_mask_2}; // @[package.scala:243:46] wire is_aligned_2 = _is_aligned_T_2 == 32'h0; // @[Edges.scala:21:{16,24}] wire [27:0] _GEN_2 = io_in_c_bits_address_0[27:0] ^ 28'h8000000; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T_10 = {io_in_c_bits_address_0[31:28], _GEN_2}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_11 = {1'h0, _address_ok_T_10}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_12 = _address_ok_T_11 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_13 = _address_ok_T_12; // @[Parameters.scala:137:46] wire _address_ok_T_14 = _address_ok_T_13 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_1_0 = _address_ok_T_14; // @[Parameters.scala:612:40] wire [31:0] _address_ok_T_15 = io_in_c_bits_address_0 ^ 32'h80000000; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_16 = {1'h0, _address_ok_T_15}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_17 = _address_ok_T_16 & 33'h1F0000000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_18 = _address_ok_T_17; // @[Parameters.scala:137:46] wire _address_ok_T_19 = _address_ok_T_18 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_1_1 = _address_ok_T_19; // @[Parameters.scala:612:40] wire address_ok_1 = _address_ok_WIRE_1_0 | _address_ok_WIRE_1_1; // @[Parameters.scala:612:40, :636:64] wire [1:0] uncommonBits_52 = _uncommonBits_T_52[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_53 = _uncommonBits_T_53[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_54 = _uncommonBits_T_54[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_55 = _uncommonBits_T_55[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_56 = _uncommonBits_T_56[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_57 = _uncommonBits_T_57[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_58 = _uncommonBits_T_58[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_59 = _uncommonBits_T_59[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_60 = _uncommonBits_T_60[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_61 = _uncommonBits_T_61[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_62 = _uncommonBits_T_62[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_63 = _uncommonBits_T_63[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_64 = _uncommonBits_T_64[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_65 = _uncommonBits_T_65[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_66 = _uncommonBits_T_66[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_67 = _uncommonBits_T_67[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_68 = _uncommonBits_T_68[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_69 = _uncommonBits_T_69[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_70 = _uncommonBits_T_70[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_71 = _uncommonBits_T_71[1:0]; // @[Parameters.scala:52:{29,56}] wire sink_ok_1 = io_in_e_bits_sink_0 != 3'h7; // @[Monitor.scala:36:7, :367:31] wire _T_2916 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_2916; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_2916; // @[Decoupled.scala:51:35] wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [5:0] source; // @[Monitor.scala:390:22] reg [31:0] address; // @[Monitor.scala:391:22] wire _T_2990 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_2990; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_2990; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_2990; // @[Decoupled.scala:51:35] wire _d_first_T_3; // @[Decoupled.scala:51:35] assign _d_first_T_3 = _T_2990; // @[Decoupled.scala:51:35] wire [12:0] _GEN_3 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_3; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_3; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_3; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_9; // @[package.scala:243:71] assign _d_first_beats1_decode_T_9 = _GEN_3; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_3 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [5:0] source_1; // @[Monitor.scala:541:22] reg [2:0] sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] wire _b_first_T = io_in_b_ready_0 & io_in_b_valid_0; // @[Decoupled.scala:51:35] wire b_first_done = _b_first_T; // @[Decoupled.scala:51:35] reg [2:0] b_first_counter; // @[Edges.scala:229:27] wire [3:0] _b_first_counter1_T = {1'h0, b_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] b_first_counter1 = _b_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire b_first = b_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _b_first_last_T = b_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire [2:0] _b_first_count_T = ~b_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] _b_first_counter_T = b_first ? 3'h0 : b_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21] reg [1:0] param_2; // @[Monitor.scala:411:22] reg [5:0] source_2; // @[Monitor.scala:413:22] reg [31:0] address_1; // @[Monitor.scala:414:22] wire _T_2987 = io_in_c_ready_0 & io_in_c_valid_0; // @[Decoupled.scala:51:35] wire _c_first_T; // @[Decoupled.scala:51:35] assign _c_first_T = _T_2987; // @[Decoupled.scala:51:35] wire _c_first_T_1; // @[Decoupled.scala:51:35] assign _c_first_T_1 = _T_2987; // @[Decoupled.scala:51:35] wire [5:0] _c_first_beats1_decode_T_1 = _c_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _c_first_beats1_decode_T_2 = ~_c_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] c_first_beats1_decode = _c_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire c_first_beats1_opdata = io_in_c_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire c_first_beats1_opdata_1 = io_in_c_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [2:0] c_first_beats1 = c_first_beats1_opdata ? c_first_beats1_decode : 3'h0; // @[Edges.scala:102:36, :220:59, :221:14] reg [2:0] c_first_counter; // @[Edges.scala:229:27] wire [3:0] _c_first_counter1_T = {1'h0, c_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] c_first_counter1 = _c_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire c_first = c_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _c_first_last_T = c_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _c_first_last_T_1 = c_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire c_first_last = _c_first_last_T | _c_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire c_first_done = c_first_last & _c_first_T; // @[Decoupled.scala:51:35] wire [2:0] _c_first_count_T = ~c_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] c_first_count = c_first_beats1 & _c_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _c_first_counter_T = c_first ? c_first_beats1 : c_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_3; // @[Monitor.scala:515:22] reg [2:0] param_3; // @[Monitor.scala:516:22] reg [2:0] size_3; // @[Monitor.scala:517:22] reg [5:0] source_3; // @[Monitor.scala:518:22] reg [31:0] address_2; // @[Monitor.scala:519:22] reg [62:0] inflight; // @[Monitor.scala:614:27] reg [251:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [251:0] inflight_sizes; // @[Monitor.scala:618:33] wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [62:0] a_set; // @[Monitor.scala:626:34] wire [62:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [251:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [251:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [8:0] _GEN_4 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [8:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_4; // @[Monitor.scala:637:69] wire [8:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_4; // @[Monitor.scala:637:69, :641:65] wire [8:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_4; // @[Monitor.scala:637:69, :680:101] wire [8:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_4; // @[Monitor.scala:637:69, :681:99] wire [8:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_4; // @[Monitor.scala:637:69, :749:69] wire [8:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_4; // @[Monitor.scala:637:69, :750:67] wire [8:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_4; // @[Monitor.scala:637:69, :790:101] wire [8:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_4; // @[Monitor.scala:637:69, :791:99] wire [251:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [251:0] _a_opcode_lookup_T_6 = {248'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [251:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[251:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [3:0] a_size_lookup; // @[Monitor.scala:639:33] wire [251:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [251:0] _a_size_lookup_T_6 = {248'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}] wire [251:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[251:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [63:0] _GEN_5 = 64'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [63:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35] wire [63:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_5; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[62:0] : 63'h0; // @[OneHot.scala:58:35] wire _T_2842 = _T_2916 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_2842 ? _a_set_T[62:0] : 63'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_2842 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_2842 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [8:0] _GEN_6 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [8:0] _a_opcodes_set_T; // @[Monitor.scala:659:79] assign _a_opcodes_set_T = _GEN_6; // @[Monitor.scala:659:79] wire [8:0] _a_sizes_set_T; // @[Monitor.scala:660:77] assign _a_sizes_set_T = _GEN_6; // @[Monitor.scala:659:79, :660:77] wire [514:0] _a_opcodes_set_T_1 = {511'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_2842 ? _a_opcodes_set_T_1[251:0] : 252'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [514:0] _a_sizes_set_T_1 = {511'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_2842 ? _a_sizes_set_T_1[251:0] : 252'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [62:0] d_clr; // @[Monitor.scala:664:34] wire [62:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [251:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [251:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_7 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_7; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_7; // @[Monitor.scala:673:46, :783:46] wire _T_2888 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [63:0] _GEN_8 = 64'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [63:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_8; // @[OneHot.scala:58:35] wire [63:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_8; // @[OneHot.scala:58:35] wire [63:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_8; // @[OneHot.scala:58:35] wire [63:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_8; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_2888 & ~d_release_ack ? _d_clr_wo_ready_T[62:0] : 63'h0; // @[OneHot.scala:58:35] wire _T_2857 = _T_2990 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_2857 ? _d_clr_T[62:0] : 63'h0; // @[OneHot.scala:58:35] wire [526:0] _d_opcodes_clr_T_5 = 527'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_2857 ? _d_opcodes_clr_T_5[251:0] : 252'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [526:0] _d_sizes_clr_T_5 = 527'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_2857 ? _d_sizes_clr_T_5[251:0] : 252'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [62:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [62:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [62:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [251:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [251:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [251:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [251:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [251:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [251:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [62:0] inflight_1; // @[Monitor.scala:726:35] reg [251:0] inflight_opcodes_1; // @[Monitor.scala:727:35] reg [251:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [5:0] _c_first_beats1_decode_T_4 = _c_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _c_first_beats1_decode_T_5 = ~_c_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] c_first_beats1_decode_1 = _c_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire [2:0] c_first_beats1_1 = c_first_beats1_opdata_1 ? c_first_beats1_decode_1 : 3'h0; // @[Edges.scala:102:36, :220:59, :221:14] reg [2:0] c_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _c_first_counter1_T_1 = {1'h0, c_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] c_first_counter1_1 = _c_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire c_first_1 = c_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _c_first_last_T_2 = c_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _c_first_last_T_3 = c_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire c_first_last_1 = _c_first_last_T_2 | _c_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire c_first_done_1 = c_first_last_1 & _c_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _c_first_count_T_1 = ~c_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] c_first_count_1 = c_first_beats1_1 & _c_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _c_first_counter_T_1 = c_first_1 ? c_first_beats1_1 : c_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [62:0] c_set; // @[Monitor.scala:738:34] wire [62:0] c_set_wo_ready; // @[Monitor.scala:739:34] wire [251:0] c_opcodes_set; // @[Monitor.scala:740:34] wire [251:0] c_sizes_set; // @[Monitor.scala:741:34] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [3:0] c_size_lookup; // @[Monitor.scala:748:35] wire [251:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [251:0] _c_opcode_lookup_T_6 = {248'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [251:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[251:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [251:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [251:0] _c_size_lookup_T_6 = {248'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}] wire [251:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[251:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [3:0] c_opcodes_set_interm; // @[Monitor.scala:754:40] wire [3:0] c_sizes_set_interm; // @[Monitor.scala:755:40] wire _same_cycle_resp_T_3 = io_in_c_valid_0 & c_first_1; // @[Monitor.scala:36:7, :759:26, :795:44] wire _same_cycle_resp_T_4 = io_in_c_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _same_cycle_resp_T_5 = io_in_c_bits_opcode_0[1]; // @[Monitor.scala:36:7] wire [63:0] _GEN_9 = 64'h1 << io_in_c_bits_source_0; // @[OneHot.scala:58:35] wire [63:0] _c_set_wo_ready_T; // @[OneHot.scala:58:35] assign _c_set_wo_ready_T = _GEN_9; // @[OneHot.scala:58:35] wire [63:0] _c_set_T; // @[OneHot.scala:58:35] assign _c_set_T = _GEN_9; // @[OneHot.scala:58:35] assign c_set_wo_ready = _same_cycle_resp_T_3 & _same_cycle_resp_T_4 & _same_cycle_resp_T_5 ? _c_set_wo_ready_T[62:0] : 63'h0; // @[OneHot.scala:58:35] wire _T_2929 = _T_2987 & c_first_1 & _same_cycle_resp_T_4 & _same_cycle_resp_T_5; // @[Decoupled.scala:51:35] assign c_set = _T_2929 ? _c_set_T[62:0] : 63'h0; // @[OneHot.scala:58:35] wire [3:0] _c_opcodes_set_interm_T = {io_in_c_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :765:53] wire [3:0] _c_opcodes_set_interm_T_1 = {_c_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:765:{53,61}] assign c_opcodes_set_interm = _T_2929 ? _c_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:754:40, :763:{25,36,70}, :765:{28,61}] wire [3:0] _c_sizes_set_interm_T = {io_in_c_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :766:51] wire [3:0] _c_sizes_set_interm_T_1 = {_c_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:766:{51,59}] assign c_sizes_set_interm = _T_2929 ? _c_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:755:40, :763:{25,36,70}, :766:{28,59}] wire [8:0] _GEN_10 = {1'h0, io_in_c_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :767:79] wire [8:0] _c_opcodes_set_T; // @[Monitor.scala:767:79] assign _c_opcodes_set_T = _GEN_10; // @[Monitor.scala:767:79] wire [8:0] _c_sizes_set_T; // @[Monitor.scala:768:77] assign _c_sizes_set_T = _GEN_10; // @[Monitor.scala:767:79, :768:77] wire [514:0] _c_opcodes_set_T_1 = {511'h0, c_opcodes_set_interm} << _c_opcodes_set_T; // @[Monitor.scala:659:54, :754:40, :767:{54,79}] assign c_opcodes_set = _T_2929 ? _c_opcodes_set_T_1[251:0] : 252'h0; // @[Monitor.scala:740:34, :763:{25,36,70}, :767:{28,54}] wire [514:0] _c_sizes_set_T_1 = {511'h0, c_sizes_set_interm} << _c_sizes_set_T; // @[Monitor.scala:659:54, :755:40, :768:{52,77}] assign c_sizes_set = _T_2929 ? _c_sizes_set_T_1[251:0] : 252'h0; // @[Monitor.scala:741:34, :763:{25,36,70}, :768:{28,52}] wire _c_probe_ack_T = io_in_c_bits_opcode_0 == 3'h4; // @[Monitor.scala:36:7, :772:47] wire _c_probe_ack_T_1 = io_in_c_bits_opcode_0 == 3'h5; // @[Monitor.scala:36:7, :772:95] wire c_probe_ack = _c_probe_ack_T | _c_probe_ack_T_1; // @[Monitor.scala:772:{47,71,95}] wire [62:0] d_clr_1; // @[Monitor.scala:774:34] wire [62:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [251:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [251:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_2960 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_2960 & d_release_ack_1 ? _d_clr_wo_ready_T_1[62:0] : 63'h0; // @[OneHot.scala:58:35] wire _T_2942 = _T_2990 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_2942 ? _d_clr_T_1[62:0] : 63'h0; // @[OneHot.scala:58:35] wire [526:0] _d_opcodes_clr_T_11 = 527'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_2942 ? _d_opcodes_clr_T_11[251:0] : 252'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [526:0] _d_sizes_clr_T_11 = 527'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_2942 ? _d_sizes_clr_T_11[251:0] : 252'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_6 = _same_cycle_resp_T_4 & _same_cycle_resp_T_5; // @[Edges.scala:68:{36,40,51}] wire _same_cycle_resp_T_7 = _same_cycle_resp_T_3 & _same_cycle_resp_T_6; // @[Monitor.scala:795:{44,55}] wire _same_cycle_resp_T_8 = io_in_c_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :795:113] wire same_cycle_resp_1 = _same_cycle_resp_T_7 & _same_cycle_resp_T_8; // @[Monitor.scala:795:{55,88,113}] wire [62:0] _inflight_T_3 = inflight_1 | c_set; // @[Monitor.scala:726:35, :738:34, :814:35] wire [62:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [62:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [251:0] _inflight_opcodes_T_3 = inflight_opcodes_1 | c_opcodes_set; // @[Monitor.scala:727:35, :740:34, :815:43] wire [251:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [251:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [251:0] _inflight_sizes_T_3 = inflight_sizes_1 | c_sizes_set; // @[Monitor.scala:728:35, :741:34, :816:41] wire [251:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [251:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27] wire [32:0] _watchdog_T_2 = {1'h0, watchdog_1} + 33'h1; // @[Monitor.scala:818:27, :823:26] wire [31:0] _watchdog_T_3 = _watchdog_T_2[31:0]; // @[Monitor.scala:823:26] reg [6:0] inflight_2; // @[Monitor.scala:828:27] wire [5:0] _d_first_beats1_decode_T_10 = _d_first_beats1_decode_T_9[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_11 = ~_d_first_beats1_decode_T_10; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_3 = _d_first_beats1_decode_T_11[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_3 = d_first_beats1_opdata_3 ? d_first_beats1_decode_3 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_3; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_3 = {1'h0, d_first_counter_3} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_3 = _d_first_counter1_T_3[2:0]; // @[Edges.scala:230:28] wire d_first_3 = d_first_counter_3 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_6 = d_first_counter_3 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_7 = d_first_beats1_3 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_3 = _d_first_last_T_6 | _d_first_last_T_7; // @[Edges.scala:232:{25,33,43}] wire d_first_done_3 = d_first_last_3 & _d_first_T_3; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_3 = ~d_first_counter1_3; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_3 = d_first_beats1_3 & _d_first_count_T_3; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_3 = d_first_3 ? d_first_beats1_3 : d_first_counter1_3; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [6:0] d_set; // @[Monitor.scala:833:25] wire _T_2996 = _T_2990 & d_first_3 & io_in_d_bits_opcode_0[2] & ~(io_in_d_bits_opcode_0[1]); // @[Decoupled.scala:51:35] wire [7:0] _d_set_T = 8'h1 << io_in_d_bits_sink_0; // @[OneHot.scala:58:35] assign d_set = _T_2996 ? _d_set_T[6:0] : 7'h0; // @[OneHot.scala:58:35] wire [6:0] e_clr; // @[Monitor.scala:839:25] wire [7:0] _e_clr_T = 8'h1 << io_in_e_bits_sink_0; // @[OneHot.scala:58:35] assign e_clr = io_in_e_valid_0 ? _e_clr_T[6:0] : 7'h0; // @[OneHot.scala:58:35]
Generate the Verilog code corresponding to the following Chisel files. File InputUnit.scala: package constellation.router import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.util._ import constellation.channel._ import constellation.routing.{FlowRoutingBundle} import constellation.noc.{HasNoCParams} class AbstractInputUnitIO( val cParam: BaseChannelParams, val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams], )(implicit val p: Parameters) extends Bundle with HasRouterOutputParams { val nodeId = cParam.destId val router_req = Decoupled(new RouteComputerReq) val router_resp = Input(new RouteComputerResp(outParams, egressParams)) val vcalloc_req = Decoupled(new VCAllocReq(cParam, outParams, egressParams)) val vcalloc_resp = Input(new VCAllocResp(outParams, egressParams)) val out_credit_available = Input(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })) val salloc_req = Vec(cParam.destSpeedup, Decoupled(new SwitchAllocReq(outParams, egressParams))) val out = Vec(cParam.destSpeedup, Valid(new SwitchBundle(outParams, egressParams))) val debug = Output(new Bundle { val va_stall = UInt(log2Ceil(cParam.nVirtualChannels).W) val sa_stall = UInt(log2Ceil(cParam.nVirtualChannels).W) }) val block = Input(Bool()) } abstract class AbstractInputUnit( val cParam: BaseChannelParams, val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams] )(implicit val p: Parameters) extends Module with HasRouterOutputParams with HasNoCParams { val nodeId = cParam.destId def io: AbstractInputUnitIO } class InputBuffer(cParam: ChannelParams)(implicit p: Parameters) extends Module { val nVirtualChannels = cParam.nVirtualChannels val io = IO(new Bundle { val enq = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits)))) val deq = Vec(cParam.nVirtualChannels, Decoupled(new BaseFlit(cParam.payloadBits))) }) val useOutputQueues = cParam.useOutputQueues val delims = if (useOutputQueues) { cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize else 0).scanLeft(0)(_+_) } else { // If no queuing, have to add an additional slot since head == tail implies empty // TODO this should be fixed, should use all slots available cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize + 1 else 0).scanLeft(0)(_+_) } val starts = delims.dropRight(1).zipWithIndex.map { case (s,i) => if (cParam.virtualChannelParams(i).traversable) s else 0 } val ends = delims.tail.zipWithIndex.map { case (s,i) => if (cParam.virtualChannelParams(i).traversable) s else 0 } val fullSize = delims.last // Ugly case. Use multiple queues if ((cParam.srcSpeedup > 1 || cParam.destSpeedup > 1 || fullSize <= 1) || !cParam.unifiedBuffer) { require(useOutputQueues) val qs = cParam.virtualChannelParams.map(v => Module(new Queue(new BaseFlit(cParam.payloadBits), v.bufferSize))) qs.zipWithIndex.foreach { case (q,i) => val sel = io.enq.map(f => f.valid && f.bits.virt_channel_id === i.U) q.io.enq.valid := sel.orR q.io.enq.bits.head := Mux1H(sel, io.enq.map(_.bits.head)) q.io.enq.bits.tail := Mux1H(sel, io.enq.map(_.bits.tail)) q.io.enq.bits.payload := Mux1H(sel, io.enq.map(_.bits.payload)) io.deq(i) <> q.io.deq } } else { val mem = Mem(fullSize, new BaseFlit(cParam.payloadBits)) val heads = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W)))) val tails = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W)))) val empty = (heads zip tails).map(t => t._1 === t._2) val qs = Seq.fill(nVirtualChannels) { Module(new Queue(new BaseFlit(cParam.payloadBits), 1, pipe=true)) } qs.foreach(_.io.enq.valid := false.B) qs.foreach(_.io.enq.bits := DontCare) val vc_sel = UIntToOH(io.enq(0).bits.virt_channel_id) val flit = Wire(new BaseFlit(cParam.payloadBits)) val direct_to_q = (Mux1H(vc_sel, qs.map(_.io.enq.ready)) && Mux1H(vc_sel, empty)) && useOutputQueues.B flit.head := io.enq(0).bits.head flit.tail := io.enq(0).bits.tail flit.payload := io.enq(0).bits.payload when (io.enq(0).valid && !direct_to_q) { val tail = tails(io.enq(0).bits.virt_channel_id) mem.write(tail, flit) tails(io.enq(0).bits.virt_channel_id) := Mux( tail === Mux1H(vc_sel, ends.map(_ - 1).map(_ max 0).map(_.U)), Mux1H(vc_sel, starts.map(_.U)), tail + 1.U) } .elsewhen (io.enq(0).valid && direct_to_q) { for (i <- 0 until nVirtualChannels) { when (io.enq(0).bits.virt_channel_id === i.U) { qs(i).io.enq.valid := true.B qs(i).io.enq.bits := flit } } } if (useOutputQueues) { val can_to_q = (0 until nVirtualChannels).map { i => !empty(i) && qs(i).io.enq.ready } val to_q_oh = PriorityEncoderOH(can_to_q) val to_q = OHToUInt(to_q_oh) when (can_to_q.orR) { val head = Mux1H(to_q_oh, heads) heads(to_q) := Mux( head === Mux1H(to_q_oh, ends.map(_ - 1).map(_ max 0).map(_.U)), Mux1H(to_q_oh, starts.map(_.U)), head + 1.U) for (i <- 0 until nVirtualChannels) { when (to_q_oh(i)) { qs(i).io.enq.valid := true.B qs(i).io.enq.bits := mem.read(head) } } } for (i <- 0 until nVirtualChannels) { io.deq(i) <> qs(i).io.deq } } else { qs.map(_.io.deq.ready := false.B) val ready_sel = io.deq.map(_.ready) val fire = io.deq.map(_.fire) assert(PopCount(fire) <= 1.U) val head = Mux1H(fire, heads) when (fire.orR) { val fire_idx = OHToUInt(fire) heads(fire_idx) := Mux( head === Mux1H(fire, ends.map(_ - 1).map(_ max 0).map(_.U)), Mux1H(fire, starts.map(_.U)), head + 1.U) } val read_flit = mem.read(head) for (i <- 0 until nVirtualChannels) { io.deq(i).valid := !empty(i) io.deq(i).bits := read_flit } } } } class InputUnit(cParam: ChannelParams, outParams: Seq[ChannelParams], egressParams: Seq[EgressChannelParams], combineRCVA: Boolean, combineSAST: Boolean ) (implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) { val nVirtualChannels = cParam.nVirtualChannels val virtualChannelParams = cParam.virtualChannelParams class InputUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) { val in = Flipped(new Channel(cParam.asInstanceOf[ChannelParams])) } val io = IO(new InputUnitIO) val g_i :: g_r :: g_v :: g_a :: g_c :: Nil = Enum(5) class InputState extends Bundle { val g = UInt(3.W) val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }) val flow = new FlowRoutingBundle val fifo_deps = UInt(nVirtualChannels.W) } val input_buffer = Module(new InputBuffer(cParam)) for (i <- 0 until cParam.srcSpeedup) { input_buffer.io.enq(i) := io.in.flit(i) } input_buffer.io.deq.foreach(_.ready := false.B) val route_arbiter = Module(new Arbiter( new RouteComputerReq, nVirtualChannels )) io.router_req <> route_arbiter.io.out val states = Reg(Vec(nVirtualChannels, new InputState)) val anyFifo = cParam.possibleFlows.map(_.fifo).reduce(_||_) val allFifo = cParam.possibleFlows.map(_.fifo).reduce(_&&_) if (anyFifo) { val idle_mask = VecInit(states.map(_.g === g_i)).asUInt for (s <- states) for (i <- 0 until nVirtualChannels) s.fifo_deps := s.fifo_deps & ~idle_mask } for (i <- 0 until cParam.srcSpeedup) { when (io.in.flit(i).fire && io.in.flit(i).bits.head) { val id = io.in.flit(i).bits.virt_channel_id assert(id < nVirtualChannels.U) assert(states(id).g === g_i) val at_dest = io.in.flit(i).bits.flow.egress_node === nodeId.U states(id).g := Mux(at_dest, g_v, g_r) states(id).vc_sel.foreach(_.foreach(_ := false.B)) for (o <- 0 until nEgress) { when (o.U === io.in.flit(i).bits.flow.egress_node_id) { states(id).vc_sel(o+nOutputs)(0) := true.B } } states(id).flow := io.in.flit(i).bits.flow if (anyFifo) { val fifo = cParam.possibleFlows.filter(_.fifo).map(_.isFlow(io.in.flit(i).bits.flow)).toSeq.orR states(id).fifo_deps := VecInit(states.zipWithIndex.map { case (s, j) => s.g =/= g_i && s.flow.asUInt === io.in.flit(i).bits.flow.asUInt && j.U =/= id }).asUInt } } } (route_arbiter.io.in zip states).zipWithIndex.map { case ((i,s),idx) => if (virtualChannelParams(idx).traversable) { i.valid := s.g === g_r i.bits.flow := s.flow i.bits.src_virt_id := idx.U when (i.fire) { s.g := g_v } } else { i.valid := false.B i.bits := DontCare } } when (io.router_req.fire) { val id = io.router_req.bits.src_virt_id assert(states(id).g === g_r) states(id).g := g_v for (i <- 0 until nVirtualChannels) { when (i.U === id) { states(i).vc_sel := io.router_resp.vc_sel } } } val mask = RegInit(0.U(nVirtualChannels.W)) val vcalloc_reqs = Wire(Vec(nVirtualChannels, new VCAllocReq(cParam, outParams, egressParams))) val vcalloc_vals = Wire(Vec(nVirtualChannels, Bool())) val vcalloc_filter = PriorityEncoderOH(Cat(vcalloc_vals.asUInt, vcalloc_vals.asUInt & ~mask)) val vcalloc_sel = vcalloc_filter(nVirtualChannels-1,0) | (vcalloc_filter >> nVirtualChannels) // Prioritize incoming packetes when (io.router_req.fire) { mask := (1.U << io.router_req.bits.src_virt_id) - 1.U } .elsewhen (vcalloc_vals.orR) { mask := Mux1H(vcalloc_sel, (0 until nVirtualChannels).map { w => ~(0.U((w+1).W)) }) } io.vcalloc_req.valid := vcalloc_vals.orR io.vcalloc_req.bits := Mux1H(vcalloc_sel, vcalloc_reqs) states.zipWithIndex.map { case (s,idx) => if (virtualChannelParams(idx).traversable) { vcalloc_vals(idx) := s.g === g_v && s.fifo_deps === 0.U vcalloc_reqs(idx).in_vc := idx.U vcalloc_reqs(idx).vc_sel := s.vc_sel vcalloc_reqs(idx).flow := s.flow when (vcalloc_vals(idx) && vcalloc_sel(idx) && io.vcalloc_req.ready) { s.g := g_a } if (combineRCVA) { when (route_arbiter.io.in(idx).fire) { vcalloc_vals(idx) := true.B vcalloc_reqs(idx).vc_sel := io.router_resp.vc_sel } } } else { vcalloc_vals(idx) := false.B vcalloc_reqs(idx) := DontCare } } io.debug.va_stall := PopCount(vcalloc_vals) - io.vcalloc_req.ready when (io.vcalloc_req.fire) { for (i <- 0 until nVirtualChannels) { when (vcalloc_sel(i)) { states(i).vc_sel := io.vcalloc_resp.vc_sel states(i).g := g_a if (!combineRCVA) { assert(states(i).g === g_v) } } } } val salloc_arb = Module(new SwitchArbiter( nVirtualChannels, cParam.destSpeedup, outParams, egressParams )) (states zip salloc_arb.io.in).zipWithIndex.map { case ((s,r),i) => if (virtualChannelParams(i).traversable) { val credit_available = (s.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U r.valid := s.g === g_a && credit_available && input_buffer.io.deq(i).valid r.bits.vc_sel := s.vc_sel val deq_tail = input_buffer.io.deq(i).bits.tail r.bits.tail := deq_tail when (r.fire && deq_tail) { s.g := g_i } input_buffer.io.deq(i).ready := r.ready } else { r.valid := false.B r.bits := DontCare } } io.debug.sa_stall := PopCount(salloc_arb.io.in.map(r => r.valid && !r.ready)) io.salloc_req <> salloc_arb.io.out when (io.block) { salloc_arb.io.out.foreach(_.ready := false.B) io.salloc_req.foreach(_.valid := false.B) } class OutBundle extends Bundle { val valid = Bool() val vid = UInt(virtualChannelBits.W) val out_vid = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W) val flit = new Flit(cParam.payloadBits) } val salloc_outs = if (combineSAST) { Wire(Vec(cParam.destSpeedup, new OutBundle)) } else { Reg(Vec(cParam.destSpeedup, new OutBundle)) } io.in.credit_return := salloc_arb.io.out.zipWithIndex.map { case (o, i) => Mux(o.fire, salloc_arb.io.chosen_oh(i), 0.U) }.reduce(_|_) io.in.vc_free := salloc_arb.io.out.zipWithIndex.map { case (o, i) => Mux(o.fire && Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)), salloc_arb.io.chosen_oh(i), 0.U) }.reduce(_|_) for (i <- 0 until cParam.destSpeedup) { val salloc_out = salloc_outs(i) salloc_out.valid := salloc_arb.io.out(i).fire salloc_out.vid := OHToUInt(salloc_arb.io.chosen_oh(i)) val vc_sel = Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.vc_sel)) val channel_oh = vc_sel.map(_.reduce(_||_)).toSeq val virt_channel = Mux1H(channel_oh, vc_sel.map(v => OHToUInt(v)).toSeq) when (salloc_arb.io.out(i).fire) { salloc_out.out_vid := virt_channel salloc_out.flit.payload := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.payload)) salloc_out.flit.head := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.head)) salloc_out.flit.tail := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)) salloc_out.flit.flow := Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.flow)) } .otherwise { salloc_out.out_vid := DontCare salloc_out.flit := DontCare } salloc_out.flit.virt_channel_id := DontCare // this gets set in the switch io.out(i).valid := salloc_out.valid io.out(i).bits.flit := salloc_out.flit io.out(i).bits.out_virt_channel := salloc_out.out_vid } def filterVCSel(sel: MixedVec[Vec[Bool]], srcV: Int) = { if (virtualChannelParams(srcV).traversable) { outParams.zipWithIndex.map { case (oP, oI) => (0 until oP.nVirtualChannels).map { oV => var allow = false virtualChannelParams(srcV).possibleFlows.foreach { pI => allow = allow || routingRelation( cParam.channelRoutingInfos(srcV), oP.channelRoutingInfos(oV), pI ) } if (!allow) sel(oI)(oV) := false.B } } } } (0 until nVirtualChannels).map { i => if (!virtualChannelParams(i).traversable) states(i) := DontCare filterVCSel(states(i).vc_sel, i) } when (reset.asBool) { states.foreach(_.g := g_i) } }
module InputUnit_117( // @[InputUnit.scala:158:7] input clock, // @[InputUnit.scala:158:7] input reset, // @[InputUnit.scala:158:7] output [3:0] io_router_req_bits_src_virt_id, // @[InputUnit.scala:170:14] output [2:0] io_router_req_bits_flow_vnet_id, // @[InputUnit.scala:170:14] output [3:0] io_router_req_bits_flow_ingress_node, // @[InputUnit.scala:170:14] output [1:0] io_router_req_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14] output [3:0] io_router_req_bits_flow_egress_node, // @[InputUnit.scala:170:14] output [2:0] io_router_req_bits_flow_egress_node_id, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_1_9, // @[InputUnit.scala:170:14] input io_vcalloc_req_ready, // @[InputUnit.scala:170:14] output io_vcalloc_req_valid, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_9, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_1_9, // @[InputUnit.scala:170:14] input io_out_credit_available_2_2, // @[InputUnit.scala:170:14] input io_out_credit_available_2_3, // @[InputUnit.scala:170:14] input io_out_credit_available_2_4, // @[InputUnit.scala:170:14] input io_out_credit_available_2_5, // @[InputUnit.scala:170:14] input io_out_credit_available_2_6, // @[InputUnit.scala:170:14] input io_out_credit_available_2_7, // @[InputUnit.scala:170:14] input io_out_credit_available_2_8, // @[InputUnit.scala:170:14] input io_out_credit_available_2_9, // @[InputUnit.scala:170:14] input io_out_credit_available_1_8, // @[InputUnit.scala:170:14] input io_out_credit_available_1_9, // @[InputUnit.scala:170:14] input io_out_credit_available_0_2, // @[InputUnit.scala:170:14] input io_out_credit_available_0_3, // @[InputUnit.scala:170:14] input io_out_credit_available_0_4, // @[InputUnit.scala:170:14] input io_out_credit_available_0_5, // @[InputUnit.scala:170:14] input io_out_credit_available_0_6, // @[InputUnit.scala:170:14] input io_out_credit_available_0_7, // @[InputUnit.scala:170:14] input io_out_credit_available_0_8, // @[InputUnit.scala:170:14] input io_out_credit_available_0_9, // @[InputUnit.scala:170:14] input io_salloc_req_0_ready, // @[InputUnit.scala:170:14] output io_salloc_req_0_valid, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_2, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_3, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_4, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_5, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_6, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_7, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_8, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_9, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_2, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_3, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_4, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_5, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_6, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_7, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_8, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_9, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_2, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_3, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_4, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_5, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_6, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_7, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_8, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_9, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_tail, // @[InputUnit.scala:170:14] output io_out_0_valid, // @[InputUnit.scala:170:14] output io_out_0_bits_flit_head, // @[InputUnit.scala:170:14] output io_out_0_bits_flit_tail, // @[InputUnit.scala:170:14] output [72:0] io_out_0_bits_flit_payload, // @[InputUnit.scala:170:14] output [2:0] io_out_0_bits_flit_flow_vnet_id, // @[InputUnit.scala:170:14] output [3:0] io_out_0_bits_flit_flow_ingress_node, // @[InputUnit.scala:170:14] output [1:0] io_out_0_bits_flit_flow_ingress_node_id, // @[InputUnit.scala:170:14] output [3:0] io_out_0_bits_flit_flow_egress_node, // @[InputUnit.scala:170:14] output [2:0] io_out_0_bits_flit_flow_egress_node_id, // @[InputUnit.scala:170:14] output [3:0] io_out_0_bits_out_virt_channel, // @[InputUnit.scala:170:14] output [3:0] io_debug_va_stall, // @[InputUnit.scala:170:14] output [3:0] io_debug_sa_stall, // @[InputUnit.scala:170:14] input io_in_flit_0_valid, // @[InputUnit.scala:170:14] input io_in_flit_0_bits_head, // @[InputUnit.scala:170:14] input io_in_flit_0_bits_tail, // @[InputUnit.scala:170:14] input [72:0] io_in_flit_0_bits_payload, // @[InputUnit.scala:170:14] input [2:0] io_in_flit_0_bits_flow_vnet_id, // @[InputUnit.scala:170:14] input [3:0] io_in_flit_0_bits_flow_ingress_node, // @[InputUnit.scala:170:14] input [1:0] io_in_flit_0_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14] input [3:0] io_in_flit_0_bits_flow_egress_node, // @[InputUnit.scala:170:14] input [2:0] io_in_flit_0_bits_flow_egress_node_id, // @[InputUnit.scala:170:14] input [3:0] io_in_flit_0_bits_virt_channel_id, // @[InputUnit.scala:170:14] output [9:0] io_in_credit_return, // @[InputUnit.scala:170:14] output [9:0] io_in_vc_free // @[InputUnit.scala:170:14] ); wire vcalloc_vals_9; // @[InputUnit.scala:266:32] wire vcalloc_vals_8; // @[InputUnit.scala:266:32] wire _salloc_arb_io_in_8_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_9_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_out_0_valid; // @[InputUnit.scala:296:26] wire [9:0] _salloc_arb_io_chosen_oh_0; // @[InputUnit.scala:296:26] wire _route_arbiter_io_in_8_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_9_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_out_valid; // @[InputUnit.scala:187:29] wire [3:0] _route_arbiter_io_out_bits_src_virt_id; // @[InputUnit.scala:187:29] wire _input_buffer_io_deq_0_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_0_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_0_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_1_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_1_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_1_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_2_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_2_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_2_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_3_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_3_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_3_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_4_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_4_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_4_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_5_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_5_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_5_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_6_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_6_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_6_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_7_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_7_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_7_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_8_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_8_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_8_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_8_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_9_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_9_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_9_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_9_bits_payload; // @[InputUnit.scala:181:28] reg [2:0] states_8_g; // @[InputUnit.scala:192:19] reg states_8_vc_sel_1_9; // @[InputUnit.scala:192:19] reg [2:0] states_8_flow_vnet_id; // @[InputUnit.scala:192:19] reg [3:0] states_8_flow_ingress_node; // @[InputUnit.scala:192:19] reg [1:0] states_8_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [3:0] states_8_flow_egress_node; // @[InputUnit.scala:192:19] reg [2:0] states_8_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_9_g; // @[InputUnit.scala:192:19] reg states_9_vc_sel_1_9; // @[InputUnit.scala:192:19] reg [2:0] states_9_flow_vnet_id; // @[InputUnit.scala:192:19] reg [3:0] states_9_flow_ingress_node; // @[InputUnit.scala:192:19] reg [1:0] states_9_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [3:0] states_9_flow_egress_node; // @[InputUnit.scala:192:19] reg [2:0] states_9_flow_egress_node_id; // @[InputUnit.scala:192:19] wire _GEN = io_in_flit_0_valid & io_in_flit_0_bits_head; // @[InputUnit.scala:205:30] wire route_arbiter_io_in_8_valid = states_8_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_9_valid = states_9_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] reg [9:0] mask; // @[InputUnit.scala:250:21] wire [9:0] _vcalloc_filter_T_3 = {vcalloc_vals_9, vcalloc_vals_8, 8'h0} & ~mask; // @[InputUnit.scala:250:21, :253:{80,87,89}, :266:32] wire [19:0] vcalloc_filter = _vcalloc_filter_T_3[0] ? 20'h1 : _vcalloc_filter_T_3[1] ? 20'h2 : _vcalloc_filter_T_3[2] ? 20'h4 : _vcalloc_filter_T_3[3] ? 20'h8 : _vcalloc_filter_T_3[4] ? 20'h10 : _vcalloc_filter_T_3[5] ? 20'h20 : _vcalloc_filter_T_3[6] ? 20'h40 : _vcalloc_filter_T_3[7] ? 20'h80 : _vcalloc_filter_T_3[8] ? 20'h100 : _vcalloc_filter_T_3[9] ? 20'h200 : vcalloc_vals_8 ? 20'h40000 : {vcalloc_vals_9, 19'h0}; // @[OneHot.scala:85:71] wire [9:0] vcalloc_sel = vcalloc_filter[9:0] | vcalloc_filter[19:10]; // @[Mux.scala:50:70] wire io_vcalloc_req_valid_0 = vcalloc_vals_8 | vcalloc_vals_9; // @[package.scala:81:59] assign vcalloc_vals_8 = states_8_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_9 = states_9_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] wire _GEN_0 = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35] wire _GEN_1 = _GEN_0 & vcalloc_sel[8]; // @[Mux.scala:32:36] wire _GEN_2 = _GEN_0 & vcalloc_sel[9]; // @[Mux.scala:32:36]
Generate the Verilog code corresponding to the following Chisel files. File CompareRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ class CompareRecFN(expWidth: Int, sigWidth: Int) extends RawModule { val io = IO(new Bundle { val a = Input(Bits((expWidth + sigWidth + 1).W)) val b = Input(Bits((expWidth + sigWidth + 1).W)) val signaling = Input(Bool()) val lt = Output(Bool()) val eq = Output(Bool()) val gt = Output(Bool()) val exceptionFlags = Output(Bits(5.W)) }) val rawA = rawFloatFromRecFN(expWidth, sigWidth, io.a) val rawB = rawFloatFromRecFN(expWidth, sigWidth, io.b) val ordered = ! rawA.isNaN && ! rawB.isNaN val bothInfs = rawA.isInf && rawB.isInf val bothZeros = rawA.isZero && rawB.isZero val eqExps = (rawA.sExp === rawB.sExp) val common_ltMags = (rawA.sExp < rawB.sExp) || (eqExps && (rawA.sig < rawB.sig)) val common_eqMags = eqExps && (rawA.sig === rawB.sig) val ordered_lt = ! bothZeros && ((rawA.sign && ! rawB.sign) || (! bothInfs && ((rawA.sign && ! common_ltMags && ! common_eqMags) || (! rawB.sign && common_ltMags)))) val ordered_eq = bothZeros || ((rawA.sign === rawB.sign) && (bothInfs || common_eqMags)) val invalid = isSigNaNRawFloat(rawA) || isSigNaNRawFloat(rawB) || (io.signaling && ! ordered) io.lt := ordered && ordered_lt io.eq := ordered && ordered_eq io.gt := ordered && ! ordered_lt && ! ordered_eq io.exceptionFlags := invalid ## 0.U(4.W) } File rawFloatFromRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ /*---------------------------------------------------------------------------- | In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be | set. *----------------------------------------------------------------------------*/ object rawFloatFromRecFN { def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat = { val exp = in(expWidth + sigWidth - 1, sigWidth - 1) val isZero = exp(expWidth, expWidth - 2) === 0.U val isSpecial = exp(expWidth, expWidth - 1) === 3.U val out = Wire(new RawFloat(expWidth, sigWidth)) out.isNaN := isSpecial && exp(expWidth - 2) out.isInf := isSpecial && ! exp(expWidth - 2) out.isZero := isZero out.sign := in(expWidth + sigWidth) out.sExp := exp.zext out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0) out } } File common.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ object consts { /*------------------------------------------------------------------------ | For rounding to integer values, rounding mode 'odd' rounds to minimum | magnitude instead, same as 'minMag'. *------------------------------------------------------------------------*/ def round_near_even = "b000".U(3.W) def round_minMag = "b001".U(3.W) def round_min = "b010".U(3.W) def round_max = "b011".U(3.W) def round_near_maxMag = "b100".U(3.W) def round_odd = "b110".U(3.W) /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ def tininess_beforeRounding = 0.U def tininess_afterRounding = 1.U /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ def flRoundOpt_sigMSBitAlwaysZero = 1 def flRoundOpt_subnormsAlwaysExact = 2 def flRoundOpt_neverUnderflows = 4 def flRoundOpt_neverOverflows = 8 /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ def divSqrtOpt_twoBitsPerCycle = 16 } class RawFloat(val expWidth: Int, val sigWidth: Int) extends Bundle { val isNaN: Bool = Bool() // overrides all other fields val isInf: Bool = Bool() // overrides 'isZero', 'sExp', and 'sig' val isZero: Bool = Bool() // overrides 'sExp' and 'sig' val sign: Bool = Bool() val sExp: SInt = SInt((expWidth + 2).W) val sig: UInt = UInt((sigWidth + 1).W) // 2 m.s. bits cannot both be 0 } //*** CHANGE THIS INTO A '.isSigNaN' METHOD OF THE 'RawFloat' CLASS: object isSigNaNRawFloat { def apply(in: RawFloat): Bool = in.isNaN && !in.sig(in.sigWidth - 2) }
module CompareRecFN_14( // @[CompareRecFN.scala:42:7] input [32:0] io_b, // @[CompareRecFN.scala:44:16] output io_gt // @[CompareRecFN.scala:44:16] ); wire [32:0] io_b_0 = io_b; // @[CompareRecFN.scala:42:7] wire [8:0] rawA_exp = 9'h2B; // @[rawFloatFromRecFN.scala:51:21] wire [2:0] _rawA_isZero_T = 3'h0; // @[rawFloatFromRecFN.scala:52:28] wire [9:0] rawA_sExp = 10'h2B; // @[rawFloatFromRecFN.scala:55:23, :60:27] wire [9:0] _rawA_out_sExp_T = 10'h2B; // @[rawFloatFromRecFN.scala:55:23, :60:27] wire [1:0] _rawA_isSpecial_T = 2'h0; // @[rawFloatFromRecFN.scala:53:28, :61:32] wire [1:0] _rawA_out_sig_T_1 = 2'h0; // @[rawFloatFromRecFN.scala:53:28, :61:32] wire [22:0] _rawA_out_sig_T_2 = 23'h0; // @[rawFloatFromRecFN.scala:61:49] wire [24:0] rawA_sig = 25'h0; // @[rawFloatFromRecFN.scala:55:23, :61:44] wire [24:0] _rawA_out_sig_T_3 = 25'h0; // @[rawFloatFromRecFN.scala:55:23, :61:44] wire rawA_isZero = 1'h1; // @[rawFloatFromRecFN.scala:52:53, :55:23, :57:36] wire rawA_isZero_0 = 1'h1; // @[rawFloatFromRecFN.scala:52:53, :55:23, :57:36] wire _rawA_out_isInf_T_1 = 1'h1; // @[rawFloatFromRecFN.scala:52:53, :55:23, :57:36] wire _ordered_T = 1'h1; // @[rawFloatFromRecFN.scala:52:53, :55:23, :57:36] wire _ordered_lt_T_3 = 1'h1; // @[rawFloatFromRecFN.scala:52:53, :55:23, :57:36] wire _invalid_T_1 = 1'h1; // @[rawFloatFromRecFN.scala:52:53, :55:23, :57:36] wire io_signaling = 1'h0; // @[CompareRecFN.scala:42:7] wire rawA_isSpecial = 1'h0; // @[rawFloatFromRecFN.scala:53:53] wire rawA_isNaN = 1'h0; // @[rawFloatFromRecFN.scala:55:23] wire rawA_isInf = 1'h0; // @[rawFloatFromRecFN.scala:55:23] wire rawA_sign = 1'h0; // @[rawFloatFromRecFN.scala:55:23] wire _rawA_out_isNaN_T = 1'h0; // @[rawFloatFromRecFN.scala:56:41] wire _rawA_out_isNaN_T_1 = 1'h0; // @[rawFloatFromRecFN.scala:56:33] wire _rawA_out_isInf_T = 1'h0; // @[rawFloatFromRecFN.scala:57:41] wire _rawA_out_isInf_T_2 = 1'h0; // @[rawFloatFromRecFN.scala:57:33] wire _rawA_out_sign_T = 1'h0; // @[rawFloatFromRecFN.scala:59:25] wire _rawA_out_sig_T = 1'h0; // @[rawFloatFromRecFN.scala:61:35] wire bothInfs = 1'h0; // @[CompareRecFN.scala:58:33] wire _ordered_lt_T_2 = 1'h0; // @[CompareRecFN.scala:67:25] wire _ordered_lt_T_5 = 1'h0; // @[CompareRecFN.scala:69:35] wire _ordered_lt_T_7 = 1'h0; // @[CompareRecFN.scala:69:54] wire _invalid_T = 1'h0; // @[common.scala:82:56] wire _invalid_T_2 = 1'h0; // @[common.scala:82:46] wire _invalid_T_8 = 1'h0; // @[CompareRecFN.scala:76:27] wire [32:0] io_a = 33'h15800000; // @[CompareRecFN.scala:42:7, :44:16] wire _io_lt_T; // @[CompareRecFN.scala:78:22] wire _io_eq_T; // @[CompareRecFN.scala:79:22] wire _io_gt_T_3; // @[CompareRecFN.scala:80:38] wire [4:0] _io_exceptionFlags_T; // @[CompareRecFN.scala:81:34] wire io_lt; // @[CompareRecFN.scala:42:7] wire io_eq; // @[CompareRecFN.scala:42:7] wire io_gt_0; // @[CompareRecFN.scala:42:7] wire [4:0] io_exceptionFlags; // @[CompareRecFN.scala:42:7] wire [8:0] rawB_exp = io_b_0[31:23]; // @[rawFloatFromRecFN.scala:51:21] wire [2:0] _rawB_isZero_T = rawB_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28] wire rawB_isZero = _rawB_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}] wire rawB_isZero_0 = rawB_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23] wire [1:0] _rawB_isSpecial_T = rawB_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28] wire rawB_isSpecial = &_rawB_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}] wire _rawB_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33] wire _rawB_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33] wire _rawB_out_sign_T; // @[rawFloatFromRecFN.scala:59:25] wire bothZeros = rawB_isZero_0; // @[rawFloatFromRecFN.scala:55:23] wire [9:0] _rawB_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27] wire [24:0] _rawB_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44] wire rawB_isNaN; // @[rawFloatFromRecFN.scala:55:23] wire rawB_isInf; // @[rawFloatFromRecFN.scala:55:23] wire rawB_sign; // @[rawFloatFromRecFN.scala:55:23] wire [9:0] rawB_sExp; // @[rawFloatFromRecFN.scala:55:23] wire [24:0] rawB_sig; // @[rawFloatFromRecFN.scala:55:23] wire _rawB_out_isNaN_T = rawB_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41] wire _rawB_out_isInf_T = rawB_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41] assign _rawB_out_isNaN_T_1 = rawB_isSpecial & _rawB_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}] assign rawB_isNaN = _rawB_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33] wire _rawB_out_isInf_T_1 = ~_rawB_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}] assign _rawB_out_isInf_T_2 = rawB_isSpecial & _rawB_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}] assign rawB_isInf = _rawB_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33] assign _rawB_out_sign_T = io_b_0[32]; // @[rawFloatFromRecFN.scala:59:25] assign rawB_sign = _rawB_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25] assign _rawB_out_sExp_T = {1'h0, rawB_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27] assign rawB_sExp = _rawB_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27] wire _rawB_out_sig_T = ~rawB_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35] wire [1:0] _rawB_out_sig_T_1 = {1'h0, _rawB_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}] wire [22:0] _rawB_out_sig_T_2 = io_b_0[22:0]; // @[rawFloatFromRecFN.scala:61:49] assign _rawB_out_sig_T_3 = {_rawB_out_sig_T_1, _rawB_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}] assign rawB_sig = _rawB_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44] wire _ordered_T_1 = ~rawB_isNaN; // @[rawFloatFromRecFN.scala:55:23] wire ordered = _ordered_T_1; // @[CompareRecFN.scala:57:{32,35}] wire eqExps = rawB_sExp == 10'h2B; // @[rawFloatFromRecFN.scala:55:23, :60:27] wire _common_ltMags_T = $signed(rawB_sExp) > 10'sh2B; // @[rawFloatFromRecFN.scala:55:23, :60:27] wire _common_ltMags_T_1 = |rawB_sig; // @[rawFloatFromRecFN.scala:55:23] wire _common_ltMags_T_2 = eqExps & _common_ltMags_T_1; // @[CompareRecFN.scala:60:29, :62:{44,57}] wire common_ltMags = _common_ltMags_T | _common_ltMags_T_2; // @[CompareRecFN.scala:62:{20,33,44}] wire _common_eqMags_T = rawB_sig == 25'h0; // @[rawFloatFromRecFN.scala:55:23, :61:44] wire common_eqMags = eqExps & _common_eqMags_T; // @[CompareRecFN.scala:60:29, :63:{32,45}] wire _ordered_eq_T_1 = common_eqMags; // @[CompareRecFN.scala:63:32, :72:62] wire _ordered_lt_T = ~bothZeros; // @[CompareRecFN.scala:59:33, :66:9] wire _ordered_lt_T_1 = ~rawB_sign; // @[rawFloatFromRecFN.scala:55:23] wire _ordered_lt_T_4 = ~common_ltMags; // @[CompareRecFN.scala:62:33, :69:38] wire _ordered_lt_T_6 = ~common_eqMags; // @[CompareRecFN.scala:63:32, :69:57] wire _ordered_lt_T_8 = ~rawB_sign; // @[rawFloatFromRecFN.scala:55:23] wire _ordered_lt_T_9 = _ordered_lt_T_8 & common_ltMags; // @[CompareRecFN.scala:62:33, :70:{29,41}] wire _ordered_lt_T_10 = _ordered_lt_T_9; // @[CompareRecFN.scala:69:74, :70:41] wire _ordered_lt_T_11 = _ordered_lt_T_10; // @[CompareRecFN.scala:68:30, :69:74] wire _ordered_lt_T_12 = _ordered_lt_T_11; // @[CompareRecFN.scala:67:41, :68:30] wire ordered_lt = _ordered_lt_T & _ordered_lt_T_12; // @[CompareRecFN.scala:66:{9,21}, :67:41] wire _ordered_eq_T = ~rawB_sign; // @[rawFloatFromRecFN.scala:55:23] wire _ordered_eq_T_2 = _ordered_eq_T & _ordered_eq_T_1; // @[CompareRecFN.scala:72:{34,49,62}] wire ordered_eq = bothZeros | _ordered_eq_T_2; // @[CompareRecFN.scala:59:33, :72:{19,49}] wire _invalid_T_3 = rawB_sig[22]; // @[rawFloatFromRecFN.scala:55:23] wire _invalid_T_4 = ~_invalid_T_3; // @[common.scala:82:{49,56}] wire _invalid_T_5 = rawB_isNaN & _invalid_T_4; // @[rawFloatFromRecFN.scala:55:23] wire _invalid_T_6 = _invalid_T_5; // @[common.scala:82:46] wire invalid = _invalid_T_6; // @[CompareRecFN.scala:75:{32,58}] wire _invalid_T_7 = ~ordered; // @[CompareRecFN.scala:57:32, :76:30] assign _io_lt_T = ordered & ordered_lt; // @[CompareRecFN.scala:57:32, :66:21, :78:22] assign io_lt = _io_lt_T; // @[CompareRecFN.scala:42:7, :78:22] assign _io_eq_T = ordered & ordered_eq; // @[CompareRecFN.scala:57:32, :72:19, :79:22] assign io_eq = _io_eq_T; // @[CompareRecFN.scala:42:7, :79:22] wire _io_gt_T = ~ordered_lt; // @[CompareRecFN.scala:66:21, :80:25] wire _io_gt_T_1 = ordered & _io_gt_T; // @[CompareRecFN.scala:57:32, :80:{22,25}] wire _io_gt_T_2 = ~ordered_eq; // @[CompareRecFN.scala:72:19, :80:41] assign _io_gt_T_3 = _io_gt_T_1 & _io_gt_T_2; // @[CompareRecFN.scala:80:{22,38,41}] assign io_gt_0 = _io_gt_T_3; // @[CompareRecFN.scala:42:7, :80:38] assign _io_exceptionFlags_T = {invalid, 4'h0}; // @[CompareRecFN.scala:75:58, :81:34] assign io_exceptionFlags = _io_exceptionFlags_T; // @[CompareRecFN.scala:42:7, :81:34] assign io_gt = io_gt_0; // @[CompareRecFN.scala:42:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File DescribedSRAM.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3.{Data, SyncReadMem, Vec} import chisel3.util.log2Ceil object DescribedSRAM { def apply[T <: Data]( name: String, desc: String, size: BigInt, // depth data: T ): SyncReadMem[T] = { val mem = SyncReadMem(size, data) mem.suggestName(name) val granWidth = data match { case v: Vec[_] => v.head.getWidth case d => d.getWidth } val uid = 0 Annotated.srams( component = mem, name = name, address_width = log2Ceil(size), data_width = data.getWidth, depth = size, description = desc, write_mask_granularity = granWidth ) mem } }
module dataArrayB0Way_3( // @[DescribedSRAM.scala:17:26] input [7:0] RW0_addr, input RW0_en, input RW0_clk, input RW0_wmode, input [63:0] RW0_wdata, output [63:0] RW0_rdata ); dataArrayB0Way_0_ext dataArrayB0Way_0_ext ( // @[DescribedSRAM.scala:17:26] .RW0_addr (RW0_addr), .RW0_en (RW0_en), .RW0_clk (RW0_clk), .RW0_wmode (RW0_wmode), .RW0_wdata (RW0_wdata), .RW0_rdata (RW0_rdata) ); // @[DescribedSRAM.scala:17:26] endmodule
Generate the Verilog code corresponding to the following Chisel files. File RoundAnyRawFNToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util.Fill import consts._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundAnyRawFNToRecFN( inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int, options: Int ) extends RawModule { override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(inExpWidth, inSigWidth)) // (allowed exponent range has limits) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((outExpWidth + outSigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0) val effectiveInSigWidth = if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1 val neverUnderflows = ((options & (flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact) ) != 0) || (inExpWidth < outExpWidth) val neverOverflows = ((options & flRoundOpt_neverOverflows) != 0) || (inExpWidth < outExpWidth) val outNaNExp = BigInt(7)<<(outExpWidth - 2) val outInfExp = BigInt(6)<<(outExpWidth - 2) val outMaxFiniteExp = outInfExp - 1 val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2 val outMinNonzeroExp = outMinNormExp - outSigWidth + 1 //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundingMode_near_even = (io.roundingMode === round_near_even) val roundingMode_minMag = (io.roundingMode === round_minMag) val roundingMode_min = (io.roundingMode === round_min) val roundingMode_max = (io.roundingMode === round_max) val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag) val roundingMode_odd = (io.roundingMode === round_odd) val roundMagUp = (roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sAdjustedExp = if (inExpWidth < outExpWidth) (io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S )(outExpWidth, 0).zext else if (inExpWidth == outExpWidth) io.in.sExp else io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S val adjustedSig = if (inSigWidth <= outSigWidth + 2) io.in.sig<<(outSigWidth - inSigWidth + 2) else (io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ## io.in.sig(inSigWidth - outSigWidth - 2, 0).orR ) val doShiftSigDown1 = if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2) val common_expOut = Wire(UInt((outExpWidth + 1).W)) val common_fractOut = Wire(UInt((outSigWidth - 1).W)) val common_overflow = Wire(Bool()) val common_totalUnderflow = Wire(Bool()) val common_underflow = Wire(Bool()) val common_inexact = Wire(Bool()) if ( neverOverflows && neverUnderflows && (effectiveInSigWidth <= outSigWidth) ) { //-------------------------------------------------------------------- //-------------------------------------------------------------------- common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1 common_fractOut := Mux(doShiftSigDown1, adjustedSig(outSigWidth + 1, 3), adjustedSig(outSigWidth, 2) ) common_overflow := false.B common_totalUnderflow := false.B common_underflow := false.B common_inexact := false.B } else { //-------------------------------------------------------------------- //-------------------------------------------------------------------- val roundMask = if (neverUnderflows) 0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W) else (lowMask( sAdjustedExp(outExpWidth, 0), outMinNormExp - outSigWidth - 1, outMinNormExp ) | doShiftSigDown1) ## 3.U(2.W) val shiftedRoundMask = 0.U(1.W) ## roundMask>>1 val roundPosMask = ~shiftedRoundMask & roundMask val roundPosBit = (adjustedSig & roundPosMask).orR val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR val anyRound = roundPosBit || anyRoundExtra val roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && roundPosBit) || (roundMagUp && anyRound) val roundedSig: Bits = Mux(roundIncr, (((adjustedSig | roundMask)>>2) +& 1.U) & ~Mux(roundingMode_near_even && roundPosBit && ! anyRoundExtra, roundMask>>1, 0.U((outSigWidth + 2).W) ), (adjustedSig & ~roundMask)>>2 | Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U) ) //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext common_expOut := sRoundedExp(outExpWidth, 0) common_fractOut := Mux(doShiftSigDown1, roundedSig(outSigWidth - 1, 1), roundedSig(outSigWidth - 2, 0) ) common_overflow := (if (neverOverflows) false.B else //*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?: (sRoundedExp>>(outExpWidth - 1) >= 3.S)) common_totalUnderflow := (if (neverUnderflows) false.B else //*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?: (sRoundedExp < outMinNonzeroExp.S)) val unboundedRange_roundPosBit = Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1)) val unboundedRange_anyRound = (doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR val unboundedRange_roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && unboundedRange_roundPosBit) || (roundMagUp && unboundedRange_anyRound) val roundCarry = Mux(doShiftSigDown1, roundedSig(outSigWidth + 1), roundedSig(outSigWidth) ) common_underflow := (if (neverUnderflows) false.B else common_totalUnderflow || //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? (anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) && Mux(doShiftSigDown1, roundMask(3), roundMask(2)) && ! ((io.detectTininess === tininess_afterRounding) && ! Mux(doShiftSigDown1, roundMask(4), roundMask(3) ) && roundCarry && roundPosBit && unboundedRange_roundIncr))) common_inexact := common_totalUnderflow || anyRound } //------------------------------------------------------------------------ //------------------------------------------------------------------------ val isNaNOut = io.invalidExc || io.in.isNaN val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero val overflow = commonCase && common_overflow val underflow = commonCase && common_underflow val inexact = overflow || (commonCase && common_inexact) val overflow_roundMagUp = roundingMode_near_even || roundingMode_near_maxMag || roundMagUp val pegMinNonzeroMagOut = commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd) val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp val notNaN_isInfOut = notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp) val signOut = Mux(isNaNOut, false.B, io.in.sign) val expOut = (common_expOut & ~Mux(io.in.isZero || common_totalUnderflow, (BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMinNonzeroMagOut, ~outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMaxFiniteMagOut, (BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W), 0.U ) & ~Mux(notNaN_isInfOut, (BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U )) | Mux(pegMinNonzeroMagOut, outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) | Mux(pegMaxFiniteMagOut, outMaxFiniteExp.U((outExpWidth + 1).W), 0.U ) | Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) | Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U) val fractOut = Mux(isNaNOut || io.in.isZero || common_totalUnderflow, Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U), common_fractOut ) | Fill(outSigWidth - 1, pegMaxFiniteMagOut) io.out := signOut ## expOut ## fractOut io.exceptionFlags := io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int) extends RawModule { override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(expWidth, sigWidth + 2)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( expWidth, sigWidth + 2, expWidth, sigWidth, options)) roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc roundAnyRawFNToRecFN.io.in := io.in roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags }
module RoundRawFNToRecFN_e8_s24_103( // @[RoundAnyRawFNToRecFN.scala:295:5] input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:299:16] input [9:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:299:16] input [26:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:299:16] output [32:0] io_out, // @[RoundAnyRawFNToRecFN.scala:299:16] output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:299:16] ); wire io_invalidExc_0 = io_invalidExc; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_isNaN_0 = io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_isInf_0 = io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:295:5] wire [9:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:295:5] wire [26:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15] wire [2:0] io_roundingMode = 3'h0; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15] wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15] wire [32:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5] wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5] RoundAnyRawFNToRecFN_ie8_is26_oe8_os24_103 roundAnyRawFNToRecFN ( // @[RoundAnyRawFNToRecFN.scala:310:15] .io_invalidExc (io_invalidExc_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_isNaN (io_in_isNaN_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_isInf (io_in_isInf_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_isZero (io_in_isZero_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_sign (io_in_sign_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_sExp (io_in_sExp_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_sig (io_in_sig_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_out (io_out_0), .io_exceptionFlags (io_exceptionFlags_0) ); // @[RoundAnyRawFNToRecFN.scala:310:15] assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5] assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: package constellation.channel import chisel3._ import chisel3.util._ import freechips.rocketchip.diplomacy._ import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.util._ import constellation.noc.{HasNoCParams} class NoCMonitor(val cParam: ChannelParams)(implicit val p: Parameters) extends Module with HasNoCParams { val io = IO(new Bundle { val in = Input(new Channel(cParam)) }) val in_flight = RegInit(VecInit(Seq.fill(cParam.nVirtualChannels) { false.B })) for (i <- 0 until cParam.srcSpeedup) { val flit = io.in.flit(i) when (flit.valid) { when (flit.bits.head) { in_flight(flit.bits.virt_channel_id) := true.B assert (!in_flight(flit.bits.virt_channel_id), "Flit head/tail sequencing is broken") } when (flit.bits.tail) { in_flight(flit.bits.virt_channel_id) := false.B } } val possibleFlows = cParam.possibleFlows when (flit.valid && flit.bits.head) { cParam match { case n: ChannelParams => n.virtualChannelParams.zipWithIndex.foreach { case (v,i) => assert(flit.bits.virt_channel_id =/= i.U || v.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR) } case _ => assert(cParam.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR) } } } } File Types.scala: package constellation.routing import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Parameters} import constellation.noc.{HasNoCParams} import constellation.channel.{Flit} /** A representation for 1 specific virtual channel in wormhole routing * * @param src the source node * @param vc ID for the virtual channel * @param dst the destination node * @param n_vc the number of virtual channels */ // BEGIN: ChannelRoutingInfo case class ChannelRoutingInfo( src: Int, dst: Int, vc: Int, n_vc: Int ) { // END: ChannelRoutingInfo require (src >= -1 && dst >= -1 && vc >= 0, s"Illegal $this") require (!(src == -1 && dst == -1), s"Illegal $this") require (vc < n_vc, s"Illegal $this") val isIngress = src == -1 val isEgress = dst == -1 } /** Represents the properties of a packet that are relevant for routing * ingressId and egressId uniquely identify a flow, but vnet and dst are used here * to simplify the implementation of routingrelations * * @param ingressId packet's source ingress point * @param egressId packet's destination egress point * @param vNet virtual subnetwork identifier * @param dst packet's destination node ID */ // BEGIN: FlowRoutingInfo case class FlowRoutingInfo( ingressId: Int, egressId: Int, vNetId: Int, ingressNode: Int, ingressNodeId: Int, egressNode: Int, egressNodeId: Int, fifo: Boolean ) { // END: FlowRoutingInfo def isFlow(f: FlowRoutingBundle): Bool = { (f.ingress_node === ingressNode.U && f.egress_node === egressNode.U && f.ingress_node_id === ingressNodeId.U && f.egress_node_id === egressNodeId.U) } def asLiteral(b: FlowRoutingBundle): BigInt = { Seq( (vNetId , b.vnet_id), (ingressNode , b.ingress_node), (ingressNodeId , b.ingress_node_id), (egressNode , b.egress_node), (egressNodeId , b.egress_node_id) ).foldLeft(0)((l, t) => { (l << t._2.getWidth) | t._1 }) } } class FlowRoutingBundle(implicit val p: Parameters) extends Bundle with HasNoCParams { // Instead of tracking ingress/egress ID, track the physical destination id and the offset at the destination // This simplifies the routing tables val vnet_id = UInt(log2Ceil(nVirtualNetworks).W) val ingress_node = UInt(log2Ceil(nNodes).W) val ingress_node_id = UInt(log2Ceil(maxIngressesAtNode).W) val egress_node = UInt(log2Ceil(nNodes).W) val egress_node_id = UInt(log2Ceil(maxEgressesAtNode).W) }
module NoCMonitor_10( // @[Monitor.scala:11:7] input clock, // @[Monitor.scala:11:7] input reset, // @[Monitor.scala:11:7] input io_in_flit_0_valid, // @[Monitor.scala:12:14] input io_in_flit_0_bits_head, // @[Monitor.scala:12:14] input io_in_flit_0_bits_tail, // @[Monitor.scala:12:14] input [5:0] io_in_flit_0_bits_flow_ingress_node, // @[Monitor.scala:12:14] input [2:0] io_in_flit_0_bits_flow_ingress_node_id, // @[Monitor.scala:12:14] input [5:0] io_in_flit_0_bits_flow_egress_node, // @[Monitor.scala:12:14] input [2:0] io_in_flit_0_bits_flow_egress_node_id, // @[Monitor.scala:12:14] input [4:0] io_in_flit_0_bits_virt_channel_id // @[Monitor.scala:12:14] ); reg in_flight_0; // @[Monitor.scala:16:26] reg in_flight_1; // @[Monitor.scala:16:26] reg in_flight_2; // @[Monitor.scala:16:26] reg in_flight_3; // @[Monitor.scala:16:26] reg in_flight_4; // @[Monitor.scala:16:26] reg in_flight_5; // @[Monitor.scala:16:26] reg in_flight_6; // @[Monitor.scala:16:26] reg in_flight_7; // @[Monitor.scala:16:26] reg in_flight_8; // @[Monitor.scala:16:26] reg in_flight_9; // @[Monitor.scala:16:26] reg in_flight_10; // @[Monitor.scala:16:26] reg in_flight_11; // @[Monitor.scala:16:26] reg in_flight_12; // @[Monitor.scala:16:26] reg in_flight_13; // @[Monitor.scala:16:26] reg in_flight_14; // @[Monitor.scala:16:26] reg in_flight_15; // @[Monitor.scala:16:26] reg in_flight_16; // @[Monitor.scala:16:26] reg in_flight_17; // @[Monitor.scala:16:26] reg in_flight_18; // @[Monitor.scala:16:26] reg in_flight_19; // @[Monitor.scala:16:26] reg in_flight_20; // @[Monitor.scala:16:26] reg in_flight_21; // @[Monitor.scala:16:26] wire _GEN = io_in_flit_0_bits_virt_channel_id == 5'h0; // @[Monitor.scala:21:46] wire _GEN_0 = io_in_flit_0_bits_virt_channel_id == 5'h1; // @[Monitor.scala:21:46] wire _GEN_1 = io_in_flit_0_bits_virt_channel_id == 5'h2; // @[Monitor.scala:21:46] wire _GEN_2 = io_in_flit_0_bits_virt_channel_id == 5'h3; // @[Monitor.scala:21:46] wire _GEN_3 = io_in_flit_0_bits_virt_channel_id == 5'h4; // @[Monitor.scala:21:46] wire _GEN_4 = io_in_flit_0_bits_virt_channel_id == 5'h5; // @[Monitor.scala:21:46] wire _GEN_5 = io_in_flit_0_bits_virt_channel_id == 5'h6; // @[Monitor.scala:21:46] wire _GEN_6 = io_in_flit_0_bits_virt_channel_id == 5'h7; // @[Monitor.scala:21:46] wire _GEN_7 = io_in_flit_0_bits_virt_channel_id == 5'h8; // @[Monitor.scala:21:46] wire _GEN_8 = io_in_flit_0_bits_virt_channel_id == 5'h9; // @[Monitor.scala:21:46] wire _GEN_9 = io_in_flit_0_bits_virt_channel_id == 5'hA; // @[Monitor.scala:21:46] wire _GEN_10 = io_in_flit_0_bits_virt_channel_id == 5'hB; // @[Monitor.scala:21:46] wire _GEN_11 = io_in_flit_0_bits_virt_channel_id == 5'hE; // @[Monitor.scala:21:46] wire _GEN_12 = io_in_flit_0_bits_virt_channel_id == 5'hF; // @[Monitor.scala:21:46] wire _GEN_13 = io_in_flit_0_bits_virt_channel_id == 5'h12; // @[Monitor.scala:21:46] wire _GEN_14 = io_in_flit_0_bits_virt_channel_id == 5'h13; // @[Monitor.scala:21:46]
Generate the Verilog code corresponding to the following Chisel files. File WidthWidget.scala: package constellation.channel import chisel3._ import chisel3.util._ import freechips.rocketchip.diplomacy._ import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.util._ object WidthWidget { def split[T <: BaseFlit](in: IrrevocableIO[T], out: IrrevocableIO[T]) = { val inBits = in.bits.payload.getWidth val outBits = out.bits.payload.getWidth require(inBits > outBits && inBits % outBits == 0) val ratio = inBits / outBits val count = RegInit(0.U(log2Ceil(ratio).W)) val first = count === 0.U val last = count === (ratio - 1).U val stored = Reg(UInt((inBits - outBits).W)) out.valid := in.valid out.bits := in.bits out.bits.head := in.bits.head && first out.bits.tail := in.bits.tail && last out.bits.payload := Mux(first, in.bits.payload, stored) in.ready := last && out.ready when (out.fire) { count := Mux(last, 0.U, count + 1.U) stored := Mux(first, in.bits.payload, stored) >> outBits } } def merge[T <: BaseFlit](in: IrrevocableIO[T], out: IrrevocableIO[T]) = { val inBits = in.bits.payload.getWidth val outBits = out.bits.payload.getWidth require(outBits > inBits && outBits % inBits == 0) val ratio = outBits / inBits val count = RegInit(0.U(log2Ceil(ratio).W)) val first = count === 0.U val last = count === (ratio - 1).U val flit = Reg(out.bits.cloneType) val payload = Reg(Vec(ratio-1, UInt(inBits.W))) out.valid := in.valid && last out.bits := flit out.bits.tail := last && in.bits.tail out.bits.payload := Cat(in.bits.payload, payload.asUInt) in.ready := !last || out.ready when (in.fire) { count := Mux(last, 0.U, count + 1.U) payload(count) := in.bits.payload when (first) { flit := in.bits } } } def apply[T <: BaseFlit](in: IrrevocableIO[T], out: IrrevocableIO[T]) = { val inBits = in.bits.payload.getWidth val outBits = out.bits.payload.getWidth if (inBits == outBits) { out <> in } else if (inBits < outBits) { merge(in, out) } else { split(in, out) } } } class IngressWidthWidget(srcBits: Int)(implicit p: Parameters) extends LazyModule { val node = new IngressChannelAdapterNode( slaveFn = { s => s.copy(payloadBits=srcBits) } ) lazy val module = new LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => WidthWidget(in.flit, out.flit) } } } object IngressWidthWidget { def apply(destBits: Int, srcBits: Int)(implicit p: Parameters) = { if (destBits == srcBits) { val node = IngressChannelEphemeralNode() node } else { val ingress_width_widget = LazyModule(new IngressWidthWidget(srcBits)) ingress_width_widget.node } } } class EgressWidthWidget(srcBits: Int)(implicit p: Parameters) extends LazyModule { val node = new EgressChannelAdapterNode( slaveFn = { s => s.copy(payloadBits=srcBits) } ) lazy val module = new LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => WidthWidget(in.flit, out.flit) } } } object EgressWidthWidget { def apply(destBits: Int, srcBits: Int)(implicit p: Parameters) = { if (destBits == srcBits) { val node = EgressChannelEphemeralNode() node } else { val egress_width_widget = LazyModule(new EgressWidthWidget(srcBits)) egress_width_widget.node } } } class ChannelWidthWidget(srcBits: Int)(implicit p: Parameters) extends LazyModule { val node = new ChannelAdapterNode( slaveFn = { s => val destBits = s.payloadBits if (srcBits > destBits) { val ratio = srcBits / destBits val virtualChannelParams = s.virtualChannelParams.map { vP => require(vP.bufferSize >= ratio) vP.copy(bufferSize = vP.bufferSize / ratio) } s.copy(payloadBits=srcBits, virtualChannelParams=virtualChannelParams) } else { val ratio = destBits / srcBits val virtualChannelParams = s.virtualChannelParams.map { vP => vP.copy(bufferSize = vP.bufferSize * ratio) } s.copy(payloadBits=srcBits, virtualChannelParams=virtualChannelParams) } } ) lazy val module = new LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => val destBits = out.cParam.payloadBits in.vc_free := out.vc_free if (srcBits == destBits) { in.credit_return := out.credit_return out.flit <> in.flit } else if (srcBits > destBits) { require(srcBits % destBits == 0, s"$srcBits, $destBits") (in.flit zip out.flit) foreach { case (iF, oF) => val in_q = Module(new Queue(new Flit(in.cParam.payloadBits), in.cParam.virtualChannelParams.map(_.bufferSize).sum, pipe=true, flow=true)) in_q.io.enq.valid := iF.valid in_q.io.enq.bits := iF.bits assert(!(in_q.io.enq.valid && !in_q.io.enq.ready)) val in_flit = Wire(Irrevocable(new Flit(in.cParam.payloadBits))) in_flit.valid := in_q.io.deq.valid in_flit.bits := in_q.io.deq.bits in_q.io.deq.ready := in_flit.ready val out_flit = Wire(Irrevocable(new Flit(out.cParam.payloadBits))) oF.valid := out_flit.valid oF.bits := out_flit.bits out_flit.ready := true.B WidthWidget(in_flit, out_flit) } val ratio = srcBits / destBits val counts = RegInit(VecInit(Seq.fill(in.nVirtualChannels) { 0.U(log2Ceil(ratio).W) })) val in_credit_return = Wire(Vec(in.nVirtualChannels, Bool())) in.credit_return := in_credit_return.asUInt for (i <- 0 until in.nVirtualChannels) { in_credit_return(i) := false.B when (out.credit_return(i)) { val last = counts(i) === (ratio-1).U counts(i) := Mux(last, 0.U, counts(i) + 1.U) when (last) { in_credit_return(i) := true.B } } } } else { require(destBits % srcBits == 0) val in_flits = Wire(Vec(in.nVirtualChannels, Irrevocable(new Flit(in.cParam.payloadBits)))) val out_flits = Wire(Vec(in.nVirtualChannels, Irrevocable(new Flit(out.cParam.payloadBits)))) for (i <- 0 until in.nVirtualChannels) { val sel = in.flit.map(f => f.valid && f.bits.virt_channel_id === i.U) in_flits(i).valid := sel.orR in_flits(i).bits := Mux1H(sel, in.flit.map(_.bits)) out_flits(i).ready := sel.orR WidthWidget(in_flits(i), out_flits(i)) } for (i <- 0 until in.flit.size) { val sel = UIntToOH(in.flit(i).bits.virt_channel_id) out.flit(i).valid := Mux1H(sel, out_flits.map(_.valid)) out.flit(i).bits := Mux1H(sel, out_flits.map(_.bits)) } val ratio = destBits / srcBits val credits = RegInit(VecInit((0 until in.nVirtualChannels).map { i => 0.U(log2Ceil(ratio*in.cParam.virtualChannelParams(i).bufferSize).W) })) val in_credit_return = Wire(Vec(in.nVirtualChannels, Bool())) in.credit_return := in_credit_return.asUInt for (i <- 0 until in.nVirtualChannels) { when (out.credit_return(i)) { in_credit_return(i) := true.B credits(i) := credits(i) + (ratio - 1).U } .otherwise { val empty = credits(i) === 0.U in_credit_return(i) := !empty credits(i) := Mux(empty, 0.U, credits(i) - 1.U) } } } } } } object ChannelWidthWidget { def apply(destBits: Int, srcBits: Int)(implicit p: Parameters) = { if (destBits == srcBits) { val node = ChannelEphemeralNode() node } else { val channel_width_widget = LazyModule(new ChannelWidthWidget(srcBits)) channel_width_widget.node } } } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } }
module IngressWidthWidget_2( // @[WidthWidget.scala:75:25] input clock, // @[WidthWidget.scala:75:25] input reset, // @[WidthWidget.scala:75:25] output auto_in_flit_ready, // @[LazyModuleImp.scala:107:25] input auto_in_flit_valid, // @[LazyModuleImp.scala:107:25] input auto_in_flit_bits_head, // @[LazyModuleImp.scala:107:25] input auto_in_flit_bits_tail, // @[LazyModuleImp.scala:107:25] input [147:0] auto_in_flit_bits_payload, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25] input auto_out_flit_ready, // @[LazyModuleImp.scala:107:25] output auto_out_flit_valid, // @[LazyModuleImp.scala:107:25] output auto_out_flit_bits_head, // @[LazyModuleImp.scala:107:25] output auto_out_flit_bits_tail, // @[LazyModuleImp.scala:107:25] output [36:0] auto_out_flit_bits_payload, // @[LazyModuleImp.scala:107:25] output [3:0] auto_out_flit_bits_egress_id // @[LazyModuleImp.scala:107:25] ); reg [1:0] count; // @[WidthWidget.scala:15:24] wire first = count == 2'h0; // @[WidthWidget.scala:15:24, :16:23, :75:25] reg [110:0] stored; // @[WidthWidget.scala:18:21] wire [147:0] _stored_T = first ? auto_in_flit_bits_payload : {37'h0, stored}; // @[WidthWidget.scala:16:23, :18:21, :24:28] wire _GEN = auto_out_flit_ready & auto_in_flit_valid; // @[Decoupled.scala:51:35] always @(posedge clock) begin // @[WidthWidget.scala:75:25] if (reset) // @[WidthWidget.scala:75:25] count <= 2'h0; // @[WidthWidget.scala:15:24, :75:25] else if (_GEN) // @[Decoupled.scala:51:35] count <= (&count) ? 2'h0 : count + 2'h1; // @[WidthWidget.scala:15:24, :17:22, :27:{19,37}, :75:25] if (_GEN) // @[Decoupled.scala:51:35] stored <= _stored_T[147:37]; // @[WidthWidget.scala:18:21, :24:28, :28:53] always @(posedge)
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_299( // @[SynchronizerReg.scala:68:19] input clock, // @[SynchronizerReg.scala:68:19] input reset, // @[SynchronizerReg.scala:68:19] output io_q // @[ShiftReg.scala:36:14] ); wire io_d = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19] wire _sync_2_T = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19] wire io_q_0; // @[SynchronizerReg.scala:68:19] reg sync_0; // @[SynchronizerReg.scala:51:87] assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19] reg sync_1; // @[SynchronizerReg.scala:51:87] reg sync_2; // @[SynchronizerReg.scala:51:87] always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19] if (reset) begin // @[SynchronizerReg.scala:68:19] sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87] end else begin // @[SynchronizerReg.scala:68:19] sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87] sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87] sync_2 <= 1'h1; // @[SynchronizerReg.scala:51:87, :54:22, :68:19] end always @(posedge, posedge)
Generate the Verilog code corresponding to the following Chisel files. File Repeater.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{Decoupled, DecoupledIO} // A Repeater passes its input to its output, unless repeat is asserted. // When repeat is asserted, the Repeater copies the input and repeats it next cycle. class Repeater[T <: Data](gen: T) extends Module { override def desiredName = s"Repeater_${gen.typeName}" val io = IO( new Bundle { val repeat = Input(Bool()) val full = Output(Bool()) val enq = Flipped(Decoupled(gen.cloneType)) val deq = Decoupled(gen.cloneType) } ) val full = RegInit(false.B) val saved = Reg(gen.cloneType) // When !full, a repeater is pass-through io.deq.valid := io.enq.valid || full io.enq.ready := io.deq.ready && !full io.deq.bits := Mux(full, saved, io.enq.bits) io.full := full when (io.enq.fire && io.repeat) { full := true.B; saved := io.enq.bits } when (io.deq.fire && !io.repeat) { full := false.B } } object Repeater { def apply[T <: Data](enq: DecoupledIO[T], repeat: Bool): DecoupledIO[T] = { val repeater = Module(new Repeater(chiselTypeOf(enq.bits))) repeater.io.repeat := repeat repeater.io.enq <> enq repeater.io.deq } }
module Repeater_TLBundleA_a21d64s9k1z3u_1( // @[Repeater.scala:10:7] input clock, // @[Repeater.scala:10:7] input reset, // @[Repeater.scala:10:7] input io_repeat, // @[Repeater.scala:13:14] output io_full, // @[Repeater.scala:13:14] output io_enq_ready, // @[Repeater.scala:13:14] input io_enq_valid, // @[Repeater.scala:13:14] input [2:0] io_enq_bits_opcode, // @[Repeater.scala:13:14] input [2:0] io_enq_bits_param, // @[Repeater.scala:13:14] input [2:0] io_enq_bits_size, // @[Repeater.scala:13:14] input [8:0] io_enq_bits_source, // @[Repeater.scala:13:14] input [20:0] io_enq_bits_address, // @[Repeater.scala:13:14] input [7:0] io_enq_bits_mask, // @[Repeater.scala:13:14] input [63:0] io_enq_bits_data, // @[Repeater.scala:13:14] input io_enq_bits_corrupt, // @[Repeater.scala:13:14] input io_deq_ready, // @[Repeater.scala:13:14] output io_deq_valid, // @[Repeater.scala:13:14] output [2:0] io_deq_bits_opcode, // @[Repeater.scala:13:14] output [2:0] io_deq_bits_param, // @[Repeater.scala:13:14] output [2:0] io_deq_bits_size, // @[Repeater.scala:13:14] output [8:0] io_deq_bits_source, // @[Repeater.scala:13:14] output [20:0] io_deq_bits_address, // @[Repeater.scala:13:14] output [7:0] io_deq_bits_mask, // @[Repeater.scala:13:14] output io_deq_bits_corrupt // @[Repeater.scala:13:14] ); wire io_repeat_0 = io_repeat; // @[Repeater.scala:10:7] wire io_enq_valid_0 = io_enq_valid; // @[Repeater.scala:10:7] wire [2:0] io_enq_bits_opcode_0 = io_enq_bits_opcode; // @[Repeater.scala:10:7] wire [2:0] io_enq_bits_param_0 = io_enq_bits_param; // @[Repeater.scala:10:7] wire [2:0] io_enq_bits_size_0 = io_enq_bits_size; // @[Repeater.scala:10:7] wire [8:0] io_enq_bits_source_0 = io_enq_bits_source; // @[Repeater.scala:10:7] wire [20:0] io_enq_bits_address_0 = io_enq_bits_address; // @[Repeater.scala:10:7] wire [7:0] io_enq_bits_mask_0 = io_enq_bits_mask; // @[Repeater.scala:10:7] wire [63:0] io_enq_bits_data_0 = io_enq_bits_data; // @[Repeater.scala:10:7] wire io_enq_bits_corrupt_0 = io_enq_bits_corrupt; // @[Repeater.scala:10:7] wire io_deq_ready_0 = io_deq_ready; // @[Repeater.scala:10:7] wire _io_enq_ready_T_1; // @[Repeater.scala:25:32] wire _io_deq_valid_T; // @[Repeater.scala:24:32] wire [2:0] _io_deq_bits_T_opcode; // @[Repeater.scala:26:21] wire [2:0] _io_deq_bits_T_param; // @[Repeater.scala:26:21] wire [2:0] _io_deq_bits_T_size; // @[Repeater.scala:26:21] wire [8:0] _io_deq_bits_T_source; // @[Repeater.scala:26:21] wire [20:0] _io_deq_bits_T_address; // @[Repeater.scala:26:21] wire [7:0] _io_deq_bits_T_mask; // @[Repeater.scala:26:21] wire [63:0] _io_deq_bits_T_data; // @[Repeater.scala:26:21] wire _io_deq_bits_T_corrupt; // @[Repeater.scala:26:21] wire io_enq_ready_0; // @[Repeater.scala:10:7] wire [2:0] io_deq_bits_opcode_0; // @[Repeater.scala:10:7] wire [2:0] io_deq_bits_param_0; // @[Repeater.scala:10:7] wire [2:0] io_deq_bits_size_0; // @[Repeater.scala:10:7] wire [8:0] io_deq_bits_source_0; // @[Repeater.scala:10:7] wire [20:0] io_deq_bits_address_0; // @[Repeater.scala:10:7] wire [7:0] io_deq_bits_mask_0; // @[Repeater.scala:10:7] wire [63:0] io_deq_bits_data; // @[Repeater.scala:10:7] wire io_deq_bits_corrupt_0; // @[Repeater.scala:10:7] wire io_deq_valid_0; // @[Repeater.scala:10:7] wire io_full_0; // @[Repeater.scala:10:7] reg full; // @[Repeater.scala:20:21] assign io_full_0 = full; // @[Repeater.scala:10:7, :20:21] reg [2:0] saved_opcode; // @[Repeater.scala:21:18] reg [2:0] saved_param; // @[Repeater.scala:21:18] reg [2:0] saved_size; // @[Repeater.scala:21:18] reg [8:0] saved_source; // @[Repeater.scala:21:18] reg [20:0] saved_address; // @[Repeater.scala:21:18] reg [7:0] saved_mask; // @[Repeater.scala:21:18] reg [63:0] saved_data; // @[Repeater.scala:21:18] reg saved_corrupt; // @[Repeater.scala:21:18] assign _io_deq_valid_T = io_enq_valid_0 | full; // @[Repeater.scala:10:7, :20:21, :24:32] assign io_deq_valid_0 = _io_deq_valid_T; // @[Repeater.scala:10:7, :24:32] wire _io_enq_ready_T = ~full; // @[Repeater.scala:20:21, :25:35] assign _io_enq_ready_T_1 = io_deq_ready_0 & _io_enq_ready_T; // @[Repeater.scala:10:7, :25:{32,35}] assign io_enq_ready_0 = _io_enq_ready_T_1; // @[Repeater.scala:10:7, :25:32] assign _io_deq_bits_T_opcode = full ? saved_opcode : io_enq_bits_opcode_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21] assign _io_deq_bits_T_param = full ? saved_param : io_enq_bits_param_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21] assign _io_deq_bits_T_size = full ? saved_size : io_enq_bits_size_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21] assign _io_deq_bits_T_source = full ? saved_source : io_enq_bits_source_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21] assign _io_deq_bits_T_address = full ? saved_address : io_enq_bits_address_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21] assign _io_deq_bits_T_mask = full ? saved_mask : io_enq_bits_mask_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21] assign _io_deq_bits_T_data = full ? saved_data : io_enq_bits_data_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21] assign _io_deq_bits_T_corrupt = full ? saved_corrupt : io_enq_bits_corrupt_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21] assign io_deq_bits_opcode_0 = _io_deq_bits_T_opcode; // @[Repeater.scala:10:7, :26:21] assign io_deq_bits_param_0 = _io_deq_bits_T_param; // @[Repeater.scala:10:7, :26:21] assign io_deq_bits_size_0 = _io_deq_bits_T_size; // @[Repeater.scala:10:7, :26:21] assign io_deq_bits_source_0 = _io_deq_bits_T_source; // @[Repeater.scala:10:7, :26:21] assign io_deq_bits_address_0 = _io_deq_bits_T_address; // @[Repeater.scala:10:7, :26:21] assign io_deq_bits_mask_0 = _io_deq_bits_T_mask; // @[Repeater.scala:10:7, :26:21] assign io_deq_bits_data = _io_deq_bits_T_data; // @[Repeater.scala:10:7, :26:21] assign io_deq_bits_corrupt_0 = _io_deq_bits_T_corrupt; // @[Repeater.scala:10:7, :26:21] wire _T_1 = io_enq_ready_0 & io_enq_valid_0 & io_repeat_0; // @[Decoupled.scala:51:35] always @(posedge clock) begin // @[Repeater.scala:10:7] if (reset) // @[Repeater.scala:10:7] full <= 1'h0; // @[Repeater.scala:20:21] else // @[Repeater.scala:10:7] full <= ~(io_deq_ready_0 & io_deq_valid_0 & ~io_repeat_0) & (_T_1 | full); // @[Decoupled.scala:51:35] if (_T_1) begin // @[Decoupled.scala:51:35] saved_opcode <= io_enq_bits_opcode_0; // @[Repeater.scala:10:7, :21:18] saved_param <= io_enq_bits_param_0; // @[Repeater.scala:10:7, :21:18] saved_size <= io_enq_bits_size_0; // @[Repeater.scala:10:7, :21:18] saved_source <= io_enq_bits_source_0; // @[Repeater.scala:10:7, :21:18] saved_address <= io_enq_bits_address_0; // @[Repeater.scala:10:7, :21:18] saved_mask <= io_enq_bits_mask_0; // @[Repeater.scala:10:7, :21:18] saved_data <= io_enq_bits_data_0; // @[Repeater.scala:10:7, :21:18] saved_corrupt <= io_enq_bits_corrupt_0; // @[Repeater.scala:10:7, :21:18] end always @(posedge) assign io_full = io_full_0; // @[Repeater.scala:10:7] assign io_enq_ready = io_enq_ready_0; // @[Repeater.scala:10:7] assign io_deq_valid = io_deq_valid_0; // @[Repeater.scala:10:7] assign io_deq_bits_opcode = io_deq_bits_opcode_0; // @[Repeater.scala:10:7] assign io_deq_bits_param = io_deq_bits_param_0; // @[Repeater.scala:10:7] assign io_deq_bits_size = io_deq_bits_size_0; // @[Repeater.scala:10:7] assign io_deq_bits_source = io_deq_bits_source_0; // @[Repeater.scala:10:7] assign io_deq_bits_address = io_deq_bits_address_0; // @[Repeater.scala:10:7] assign io_deq_bits_mask = io_deq_bits_mask_0; // @[Repeater.scala:10:7] assign io_deq_bits_corrupt = io_deq_bits_corrupt_0; // @[Repeater.scala:10:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File TilelinkAdapters.scala: package constellation.protocol import chisel3._ import chisel3.util._ import constellation.channel._ import constellation.noc._ import constellation.soc.{CanAttachToGlobalNoC} import org.chipsalliance.cde.config._ import freechips.rocketchip.diplomacy._ import freechips.rocketchip.util._ import freechips.rocketchip.tilelink._ import scala.collection.immutable.{ListMap} abstract class TLChannelToNoC[T <: TLChannel](gen: => T, edge: TLEdge, idToEgress: Int => Int)(implicit val p: Parameters) extends Module with TLFieldHelper { val flitWidth = minTLPayloadWidth(gen) val io = IO(new Bundle { val protocol = Flipped(Decoupled(gen)) val flit = Decoupled(new IngressFlit(flitWidth)) }) def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B // convert decoupled to irrevocable val q = Module(new Queue(gen, 1, pipe=true, flow=true)) val protocol = q.io.deq val has_body = Wire(Bool()) val body_fields = getBodyFields(protocol.bits) val const_fields = getConstFields(protocol.bits) val head = edge.first(protocol.bits, protocol.fire) val tail = edge.last(protocol.bits, protocol.fire) def requestOH: Seq[Bool] val body = Cat( body_fields.filter(_.getWidth > 0).map(_.asUInt)) val const = Cat(const_fields.filter(_.getWidth > 0).map(_.asUInt)) val is_body = RegInit(false.B) io.flit.valid := protocol.valid protocol.ready := io.flit.ready && (is_body || !has_body) io.flit.bits.head := head && !is_body io.flit.bits.tail := tail && (is_body || !has_body) io.flit.bits.egress_id := Mux1H(requestOH.zipWithIndex.map { case (r, i) => r -> idToEgress(i).U }) io.flit.bits.payload := Mux(is_body, body, const) when (io.flit.fire && io.flit.bits.head) { is_body := true.B } when (io.flit.fire && io.flit.bits.tail) { is_body := false.B } } abstract class TLChannelFromNoC[T <: TLChannel](gen: => T)(implicit val p: Parameters) extends Module with TLFieldHelper { val flitWidth = minTLPayloadWidth(gen) val io = IO(new Bundle { val protocol = Decoupled(gen) val flit = Flipped(Decoupled(new EgressFlit(flitWidth))) }) // Handle size = 1 gracefully (Chisel3 empty range is broken) def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0) val protocol = Wire(Decoupled(gen)) val body_fields = getBodyFields(protocol.bits) val const_fields = getConstFields(protocol.bits) val is_const = RegInit(true.B) val const_reg = Reg(UInt(const_fields.map(_.getWidth).sum.W)) val const = Mux(io.flit.bits.head, io.flit.bits.payload, const_reg) io.flit.ready := (is_const && !io.flit.bits.tail) || protocol.ready protocol.valid := (!is_const || io.flit.bits.tail) && io.flit.valid def assign(i: UInt, sigs: Seq[Data]) = { var t = i for (s <- sigs.reverse) { s := t.asTypeOf(s.cloneType) t = t >> s.getWidth } } assign(const, const_fields) assign(io.flit.bits.payload, body_fields) when (io.flit.fire && io.flit.bits.head) { is_const := false.B; const_reg := io.flit.bits.payload } when (io.flit.fire && io.flit.bits.tail) { is_const := true.B } } trait HasAddressDecoder { // Filter a list to only those elements selected def filter[T](data: Seq[T], mask: Seq[Boolean]) = (data zip mask).filter(_._2).map(_._1) val edgeIn: TLEdge val edgesOut: Seq[TLEdge] lazy val reacheableIO = edgesOut.map { mp => edgeIn.client.clients.exists { c => mp.manager.managers.exists { m => c.visibility.exists { ca => m.address.exists { ma => ca.overlaps(ma) }} }} }.toVector lazy val releaseIO = (edgesOut zip reacheableIO).map { case (mp, reachable) => reachable && edgeIn.client.anySupportProbe && mp.manager.anySupportAcquireB }.toVector def outputPortFn(connectIO: Seq[Boolean]) = { val port_addrs = edgesOut.map(_.manager.managers.flatMap(_.address)) val routingMask = AddressDecoder(filter(port_addrs, connectIO)) val route_addrs = port_addrs.map(seq => AddressSet.unify(seq.map(_.widen(~routingMask)).distinct)) route_addrs.map(seq => (addr: UInt) => seq.map(_.contains(addr)).reduce(_||_)) } } class TLAToNoC( val edgeIn: TLEdge, val edgesOut: Seq[TLEdge], bundle: TLBundleParameters, slaveToAEgress: Int => Int, sourceStart: Int )(implicit p: Parameters) extends TLChannelToNoC(new TLBundleA(bundle), edgeIn, slaveToAEgress)(p) with HasAddressDecoder { has_body := edgeIn.hasData(protocol.bits) || (~protocol.bits.mask =/= 0.U) lazy val connectAIO = reacheableIO lazy val requestOH = outputPortFn(connectAIO).zipWithIndex.map { case (o, j) => connectAIO(j).B && (unique(connectAIO) || o(protocol.bits.address)) } q.io.enq <> io.protocol q.io.enq.bits.source := io.protocol.bits.source | sourceStart.U } class TLAFromNoC(edgeOut: TLEdge, bundle: TLBundleParameters)(implicit p: Parameters) extends TLChannelFromNoC(new TLBundleA(bundle))(p) { io.protocol <> protocol when (io.flit.bits.head) { io.protocol.bits.mask := ~(0.U(io.protocol.bits.mask.getWidth.W)) } } class TLBToNoC( edgeOut: TLEdge, edgesIn: Seq[TLEdge], bundle: TLBundleParameters, masterToBIngress: Int => Int )(implicit p: Parameters) extends TLChannelToNoC(new TLBundleB(bundle), edgeOut, masterToBIngress)(p) { has_body := edgeOut.hasData(protocol.bits) || (~protocol.bits.mask =/= 0.U) lazy val inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client)) lazy val requestOH = inputIdRanges.map { i => i.contains(protocol.bits.source) } q.io.enq <> io.protocol } class TLBFromNoC(edgeIn: TLEdge, bundle: TLBundleParameters, sourceSize: Int)(implicit p: Parameters) extends TLChannelFromNoC(new TLBundleB(bundle))(p) { io.protocol <> protocol io.protocol.bits.source := trim(protocol.bits.source, sourceSize) when (io.flit.bits.head) { io.protocol.bits.mask := ~(0.U(io.protocol.bits.mask.getWidth.W)) } } class TLCToNoC( val edgeIn: TLEdge, val edgesOut: Seq[TLEdge], bundle: TLBundleParameters, slaveToCEgress: Int => Int, sourceStart: Int )(implicit p: Parameters) extends TLChannelToNoC(new TLBundleC(bundle), edgeIn, slaveToCEgress)(p) with HasAddressDecoder { has_body := edgeIn.hasData(protocol.bits) lazy val connectCIO = releaseIO lazy val requestOH = outputPortFn(connectCIO).zipWithIndex.map { case (o, j) => connectCIO(j).B && (unique(connectCIO) || o(protocol.bits.address)) } q.io.enq <> io.protocol q.io.enq.bits.source := io.protocol.bits.source | sourceStart.U } class TLCFromNoC(edgeOut: TLEdge, bundle: TLBundleParameters)(implicit p: Parameters) extends TLChannelFromNoC(new TLBundleC(bundle))(p) { io.protocol <> protocol } class TLDToNoC( edgeOut: TLEdge, edgesIn: Seq[TLEdge], bundle: TLBundleParameters, masterToDIngress: Int => Int, sourceStart: Int )(implicit p: Parameters) extends TLChannelToNoC(new TLBundleD(bundle), edgeOut, masterToDIngress)(p) { has_body := edgeOut.hasData(protocol.bits) lazy val inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client)) lazy val requestOH = inputIdRanges.map { i => i.contains(protocol.bits.source) } q.io.enq <> io.protocol q.io.enq.bits.sink := io.protocol.bits.sink | sourceStart.U } class TLDFromNoC(edgeIn: TLEdge, bundle: TLBundleParameters, sourceSize: Int)(implicit p: Parameters) extends TLChannelFromNoC(new TLBundleD(bundle))(p) { io.protocol <> protocol io.protocol.bits.source := trim(protocol.bits.source, sourceSize) } class TLEToNoC( val edgeIn: TLEdge, val edgesOut: Seq[TLEdge], bundle: TLBundleParameters, slaveToEEgress: Int => Int )(implicit p: Parameters) extends TLChannelToNoC(new TLBundleE(bundle), edgeIn, slaveToEEgress)(p) { has_body := edgeIn.hasData(protocol.bits) lazy val outputIdRanges = TLXbar.mapOutputIds(edgesOut.map(_.manager)) lazy val requestOH = outputIdRanges.map { o => o.contains(protocol.bits.sink) } q.io.enq <> io.protocol } class TLEFromNoC(edgeOut: TLEdge, bundle: TLBundleParameters, sourceSize: Int)(implicit p: Parameters) extends TLChannelFromNoC(new TLBundleE(bundle))(p) { io.protocol <> protocol io.protocol.bits.sink := trim(protocol.bits.sink, sourceSize) } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLDToNoC( // @[TilelinkAdapters.scala:171:7] input clock, // @[TilelinkAdapters.scala:171:7] input reset, // @[TilelinkAdapters.scala:171:7] output io_protocol_ready, // @[TilelinkAdapters.scala:19:14] input io_protocol_valid, // @[TilelinkAdapters.scala:19:14] input [2:0] io_protocol_bits_opcode, // @[TilelinkAdapters.scala:19:14] input [1:0] io_protocol_bits_param, // @[TilelinkAdapters.scala:19:14] input [3:0] io_protocol_bits_size, // @[TilelinkAdapters.scala:19:14] input [6:0] io_protocol_bits_source, // @[TilelinkAdapters.scala:19:14] input [5:0] io_protocol_bits_sink, // @[TilelinkAdapters.scala:19:14] input io_protocol_bits_denied, // @[TilelinkAdapters.scala:19:14] input [127:0] io_protocol_bits_data, // @[TilelinkAdapters.scala:19:14] input io_protocol_bits_corrupt, // @[TilelinkAdapters.scala:19:14] input io_flit_ready, // @[TilelinkAdapters.scala:19:14] output io_flit_valid, // @[TilelinkAdapters.scala:19:14] output io_flit_bits_head, // @[TilelinkAdapters.scala:19:14] output io_flit_bits_tail, // @[TilelinkAdapters.scala:19:14] output [128:0] io_flit_bits_payload, // @[TilelinkAdapters.scala:19:14] output [3:0] io_flit_bits_egress_id // @[TilelinkAdapters.scala:19:14] ); wire _q_io_deq_valid; // @[TilelinkAdapters.scala:26:17] wire [2:0] _q_io_deq_bits_opcode; // @[TilelinkAdapters.scala:26:17] wire [1:0] _q_io_deq_bits_param; // @[TilelinkAdapters.scala:26:17] wire [3:0] _q_io_deq_bits_size; // @[TilelinkAdapters.scala:26:17] wire [6:0] _q_io_deq_bits_source; // @[TilelinkAdapters.scala:26:17] wire [5:0] _q_io_deq_bits_sink; // @[TilelinkAdapters.scala:26:17] wire _q_io_deq_bits_denied; // @[TilelinkAdapters.scala:26:17] wire [127:0] _q_io_deq_bits_data; // @[TilelinkAdapters.scala:26:17] wire _q_io_deq_bits_corrupt; // @[TilelinkAdapters.scala:26:17] wire [26:0] _tail_beats1_decode_T = 27'hFFF << _q_io_deq_bits_size; // @[package.scala:243:71] reg [7:0] head_counter; // @[Edges.scala:229:27] wire head = head_counter == 8'h0; // @[Edges.scala:229:27, :231:25] wire [7:0] tail_beats1 = _q_io_deq_bits_opcode[0] ? ~(_tail_beats1_decode_T[11:4]) : 8'h0; // @[package.scala:243:{46,71,76}] reg [7:0] tail_counter; // @[Edges.scala:229:27] reg is_body; // @[TilelinkAdapters.scala:39:24] wire q_io_deq_ready = io_flit_ready & (is_body | ~(_q_io_deq_bits_opcode[0])); // @[Edges.scala:106:36] wire io_flit_bits_head_0 = head & ~is_body; // @[Edges.scala:231:25] wire io_flit_bits_tail_0 = (tail_counter == 8'h1 | tail_beats1 == 8'h0) & (is_body | ~(_q_io_deq_bits_opcode[0])); // @[Edges.scala:106:36, :221:14, :229:27, :232:{25,33,43}] wire _GEN = io_flit_ready & _q_io_deq_valid; // @[Decoupled.scala:51:35] always @(posedge clock) begin // @[TilelinkAdapters.scala:171:7] if (reset) begin // @[TilelinkAdapters.scala:171:7] head_counter <= 8'h0; // @[Edges.scala:229:27] tail_counter <= 8'h0; // @[Edges.scala:229:27] is_body <= 1'h0; // @[TilelinkAdapters.scala:39:24, :171:7] end else begin // @[TilelinkAdapters.scala:171:7] if (q_io_deq_ready & _q_io_deq_valid) begin // @[Decoupled.scala:51:35] head_counter <= head ? (_q_io_deq_bits_opcode[0] ? ~(_tail_beats1_decode_T[11:4]) : 8'h0) : head_counter - 8'h1; // @[package.scala:243:{46,71,76}] tail_counter <= tail_counter == 8'h0 ? tail_beats1 : tail_counter - 8'h1; // @[Edges.scala:221:14, :229:27, :230:28, :231:25, :236:21] end is_body <= ~(_GEN & io_flit_bits_tail_0) & (_GEN & io_flit_bits_head_0 | is_body); // @[Decoupled.scala:51:35] end always @(posedge)
Generate the Verilog code corresponding to the following Chisel files. File Nodes.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection} case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args)) object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle] { def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo) def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo) def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle) def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle) def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString) override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = { val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge))) monitor.io.in := bundle } override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters = pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }) override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters = pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }) } trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut] case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode case class TLAdapterNode( clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s }, managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLJunctionNode( clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters], managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])( implicit valName: ValName) extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode object TLNameNode { def apply(name: ValName) = TLIdentityNode()(name) def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLIdentityNode = apply(Some(name)) } case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)() object TLTempNode { def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp")) } case class TLNexusNode( clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters, managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)( implicit valName: ValName) extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode abstract class TLCustomNode(implicit valName: ValName) extends CustomNode(TLImp) with TLFormatNode // Asynchronous crossings trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters] object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle] { def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle) def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString) override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLAsyncAdapterNode( clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s }, managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode object TLAsyncNameNode { def apply(name: ValName) = TLAsyncIdentityNode()(name) def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLAsyncIdentityNode = apply(Some(name)) } case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLAsyncImp)( dFn = { p => TLAsyncClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName) extends MixedAdapterNode(TLAsyncImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) }, uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut] // Rationally related crossings trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters] object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle] { def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle) def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */) override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLRationalAdapterNode( clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s }, managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode object TLRationalNameNode { def apply(name: ValName) = TLRationalIdentityNode()(name) def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLRationalIdentityNode = apply(Some(name)) } case class TLRationalSourceNode()(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLRationalImp)( dFn = { p => TLRationalClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName) extends MixedAdapterNode(TLRationalImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut] // Credited version of TileLink channels trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters] object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle] { def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle) def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString) override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLCreditedAdapterNode( clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s }, managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode object TLCreditedNameNode { def apply(name: ValName) = TLCreditedIdentityNode()(name) def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLCreditedIdentityNode = apply(Some(name)) } case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLCreditedImp)( dFn = { p => TLCreditedClientPortParameters(delay, p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLCreditedImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut] File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File BootROM.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.devices.tilelink import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.bundlebridge._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.{AddressSet, RegionType, TransferSizes} import freechips.rocketchip.resources.{Resource, SimpleDevice} import freechips.rocketchip.subsystem._ import freechips.rocketchip.tilelink.{TLFragmenter, TLManagerNode, TLSlaveParameters, TLSlavePortParameters} import java.nio.ByteBuffer import java.nio.file.{Files, Paths} /** Size, location and contents of the boot rom. */ case class BootROMParams( address: BigInt = 0x10000, size: Int = 0x10000, hang: BigInt = 0x10040, // The hang parameter is used as the power-on reset vector contentFileName: String) class TLROM(val base: BigInt, val size: Int, contentsDelayed: => Seq[Byte], executable: Boolean = true, beatBytes: Int = 4, resources: Seq[Resource] = new SimpleDevice("rom", Seq("sifive,rom0")).reg("mem"))(implicit p: Parameters) extends LazyModule { val node = TLManagerNode(Seq(TLSlavePortParameters.v1( Seq(TLSlaveParameters.v1( address = List(AddressSet(base, size-1)), resources = resources, regionType = RegionType.UNCACHED, executable = executable, supportsGet = TransferSizes(1, beatBytes), fifoId = Some(0))), beatBytes = beatBytes))) lazy val module = new Impl class Impl extends LazyModuleImp(this) { val contents = contentsDelayed val wrapSize = 1 << log2Ceil(contents.size) require (wrapSize <= size) val (in, edge) = node.in(0) val words = (contents ++ Seq.fill(wrapSize-contents.size)(0.toByte)).grouped(beatBytes).toSeq val bigs = words.map(_.foldRight(BigInt(0)){ case (x,y) => (x.toInt & 0xff) | y << 8}) val rom = VecInit(bigs.map(_.U((8*beatBytes).W))) in.d.valid := in.a.valid in.a.ready := in.d.ready val index = in.a.bits.address(log2Ceil(wrapSize)-1,log2Ceil(beatBytes)) val high = if (wrapSize == size) 0.U else in.a.bits.address(log2Ceil(size)-1, log2Ceil(wrapSize)) in.d.bits := edge.AccessAck(in.a.bits, Mux(high.orR, 0.U, rom(index))) // Tie off unused channels in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B } } case class BootROMLocated(loc: HierarchicalLocation) extends Field[Option[BootROMParams]](None) object BootROM { /** BootROM.attach not only instantiates a TLROM and attaches it to the tilelink interconnect * at a configurable location, but also drives the tiles' reset vectors to point * at its 'hang' address parameter value. */ def attach(params: BootROMParams, subsystem: BaseSubsystem with HasHierarchicalElements with HasTileInputConstants, where: TLBusWrapperLocation) (implicit p: Parameters): TLROM = { val tlbus = subsystem.locateTLBusWrapper(where) val bootROMDomainWrapper = tlbus.generateSynchronousDomain("BootROM").suggestName("bootrom_domain") val bootROMResetVectorSourceNode = BundleBridgeSource[UInt]() lazy val contents = { val romdata = Files.readAllBytes(Paths.get(params.contentFileName)) val rom = ByteBuffer.wrap(romdata) rom.array() ++ subsystem.dtb.contents } val bootrom = bootROMDomainWrapper { LazyModule(new TLROM(params.address, params.size, contents, true, tlbus.beatBytes)) } bootrom.node := tlbus.coupleTo("bootrom"){ TLFragmenter(tlbus, Some("BootROM")) := _ } // Drive the `subsystem` reset vector to the `hang` address of this Boot ROM. subsystem.tileResetVectorNexusNode := bootROMResetVectorSourceNode InModuleBody { val reset_vector_source = bootROMResetVectorSourceNode.bundle require(reset_vector_source.getWidth >= params.hang.bitLength, s"BootROM defined with a reset vector (${params.hang})too large for physical address space (${reset_vector_source.getWidth})") bootROMResetVectorSourceNode.bundle := params.hang.U } bootrom } }
module TLROM( // @[BootROM.scala:41:9] input clock, // @[BootROM.scala:41:9] input reset, // @[BootROM.scala:41:9] output auto_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25] input [1:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25] input [11:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25] input [16:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25] input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_in_d_valid, // @[LazyModuleImp.scala:107:25] output [1:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output [11:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25] output [63:0] auto_in_d_bits_data // @[LazyModuleImp.scala:107:25] ); wire [511:0][63:0] _GEN = '{64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h737470, 64'h75727265746E6900, 64'h746E657261702D74, 64'h7075727265746E69, 64'h736B636F6C6300, 64'h7665646E2C766373, 64'h697200797469726F, 64'h6972702D78616D2C, 64'h7663736972006863, 64'h617474612D677562, 64'h6564006465646E65, 64'h7478652D73747075, 64'h727265746E690073, 64'h656D616E2D747570, 64'h74756F2D6B636F6C, 64'h6300736C6C65632D, 64'h6B636F6C63230074, 64'h6E756F632D726873, 64'h6D2C657669666973, 64'h64656966696E75, 64'h2D6568636163006C, 64'h6576656C2D656863, 64'h61630073656D616E, 64'h2D67657200736567, 64'h6E617200656C646E, 64'h6168700072656C6C, 64'h6F72746E6F632D74, 64'h7075727265746E69, 64'h736C6C65632D74, 64'h7075727265746E69, 64'h230074696C70732D, 64'h626C740073757461, 64'h747300736E6F6967, 64'h6572706D702C7663, 64'h7369720079746972, 64'h616C756E61726770, 64'h6D702C7663736972, 64'h6173692C766373, 64'h6972006765720065, 64'h686361632D6C6576, 64'h656C2D7478656E00, 64'h657079742D756D6D, 64'h657A69732D626C, 64'h742D690073746573, 64'h2D626C742D690065, 64'h7A69732D65686361, 64'h632D690073746573, 64'h2D65686361632D69, 64'h657A69732D6B63, 64'h6F6C622D65686361, 64'h632D6900746E756F, 64'h632D746E696F706B, 64'h616572622D636578, 64'h652D657261776472, 64'h616800657079745F, 64'h6563697665640065, 64'h7A69732D626C742D, 64'h6400737465732D62, 64'h6C742D6400657A69, 64'h732D65686361632D, 64'h6400737465732D65, 64'h686361632D640065, 64'h7A69732D6B636F6C, 64'h622D65686361632D, 64'h640079636E657571, 64'h6572662D6B636F6C, 64'h630079636E657571, 64'h6572662D65736162, 64'h656D697400687461, 64'h702D74756F647473, 64'h306C6169726573, 64'h6C65646F6D0065, 64'h6C62697461706D6F, 64'h6300736C6C65632D, 64'h657A69732300736C, 64'h6C65632D73736572, 64'h6464612309000000, 64'h200000002000000, 64'h2000000006C6F72, 64'h746E6F63A8010000, 64'h800000003000000, 64'h10000000001100, 64'h2E01000008000000, 64'h300000000000000, 64'h3030303031314072, 64'h65747465732D7465, 64'h7365722D656C6974, 64'h100000002000000, 64'h6C6F72746E6F63, 64'hA801000008000000, 64'h300000000100000, 64'h2102E010000, 64'h800000003000000, 64'h100000055020000, 64'h400000003000000, 64'h600000044020000, 64'h400000003000000, 64'h30747261, 64'h752C657669666973, 64'h1B0000000D000000, 64'h300000005000000, 64'h3D02000004000000, 64'h300000000303030, 64'h3032303031406C61, 64'h6972657301000000, 64'h2000000006B636F, 64'h6C632D6465786966, 64'h1B0000000C000000, 64'h300000000006B63, 64'h6F6C635F73756273, 64'hEB0100000B000000, 64'h30000000065CD1D, 64'h5300000004000000, 64'h300000000000000, 64'hDE01000004000000, 64'h300000000006B63, 64'h6F6C635F73756273, 64'h100000002000000, 64'h6D656DA8010000, 64'h400000003000000, 64'h10000000100, 64'h2E01000008000000, 64'h300000000306D6F, 64'h722C657669666973, 64'h1B0000000C000000, 64'h300000000000030, 64'h30303031406D6F72, 64'h100000002000000, 64'h500000099010000, 64'h400000003000000, 64'h6B636F6C632D64, 64'h657869661B000000, 64'hC00000003000000, 64'h6B636F6C635F, 64'h73756270EB010000, 64'hB00000003000000, 64'h65CD1D53000000, 64'h400000003000000, 64'hDE010000, 64'h400000003000000, 64'h6B636F6C635F, 64'h7375627001000000, 64'h2000000006B636F, 64'h6C632D6465786966, 64'h1B0000000C000000, 64'h300000000006B63, 64'h6F6C635F7375626D, 64'hEB0100000B000000, 64'h30000000065CD1D, 64'h5300000004000000, 64'h300000000000000, 64'hDE01000004000000, 64'h300000000006B63, 64'h6F6C635F7375626D, 64'h100000002000000, 64'h600000099010000, 64'h400000003000000, 64'h100000032020000, 64'h400000003000000, 64'h10000001F020000, 64'h400000003000000, 64'h6C6F72746E6F63, 64'hA801000008000000, 64'h300000000000004, 64'hC2E010000, 64'h800000003000000, 64'h900000004000000, 64'hB00000004000000, 64'hFE01000010000000, 64'h300000084010000, 64'h3000000, 64'h3063696C702C76, 64'h637369721B000000, 64'hC00000003000000, 64'h100000073010000, 64'h400000003000000, 64'h30303030, 64'h3030634072656C6C, 64'h6F72746E6F632D74, 64'h7075727265746E69, 64'h100000002000000, 64'h6B636F6C632D64, 64'h657869661B000000, 64'hC00000003000000, 64'h6B636F6C635F, 64'h73756266EB010000, 64'hB00000003000000, 64'h65CD1D53000000, 64'h400000003000000, 64'hDE010000, 64'h400000003000000, 64'h6B636F6C635F, 64'h7375626601000000, 64'h200000000100000, 64'h3000002E010000, 64'h800000003000000, 64'h30726F7272, 64'h652C657669666973, 64'h1B0000000E000000, 64'h300000000000030, 64'h3030334065636976, 64'h65642D726F727265, 64'h100000002000000, 64'h6C6F72746E6F63, 64'hA801000008000000, 64'h300000000100000, 64'h2E010000, 64'h800000003000000, 64'hFFFF000004000000, 64'hFE01000008000000, 64'h300000000000000, 64'h6761746A12020000, 64'h500000003000000, 64'h3331302D, 64'h67756265642C7663, 64'h736972003331302D, 64'h67756265642C6576, 64'h696669731B000000, 64'h2100000003000000, 64'h304072656C6C, 64'h6F72746E6F632D67, 64'h7562656401000000, 64'h2000000006C6F72, 64'h746E6F63A8010000, 64'h800000003000000, 64'h10000000001000, 64'h2E01000008000000, 64'h300000000003030, 64'h3030303140726574, 64'h61672D6B636F6C63, 64'h100000002000000, 64'h6C6F72746E6F63, 64'hA801000008000000, 64'h300000000000100, 64'h22E010000, 64'h800000003000000, 64'h700000004000000, 64'h300000004000000, 64'hFE01000010000000, 64'h300000000000000, 64'h30746E696C632C76, 64'h637369721B000000, 64'hD00000003000000, 64'h3030303030, 64'h303240746E696C63, 64'h100000002000000, 64'h6B636F6C632D64, 64'h657869661B000000, 64'hC00000003000000, 64'h6B636F6C635F, 64'h73756263EB010000, 64'hB00000003000000, 64'h65CD1D53000000, 64'h400000003000000, 64'hDE010000, 64'h400000003000000, 64'h6B636F6C635F, 64'h7375626301000000, 64'h200000001000000, 64'h9901000004000000, 64'h300000007000000, 64'hCC01000004000000, 64'h3000000006C6F72, 64'h746E6F63A8010000, 64'h800000003000000, 64'h10000000000102, 64'h2E01000008000000, 64'h300000003000000, 64'h20000001D010000, 64'h800000003000000, 64'h65686361, 64'h6300306568636163, 64'h65766973756C636E, 64'h692C657669666973, 64'h1B0000001D000000, 64'h3000000BE010000, 64'h3000000, 64'h80085000000, 64'h400000003000000, 64'h4000078000000, 64'h400000003000000, 64'h2000000B2010000, 64'h400000003000000, 64'h4000000065000000, 64'h400000003000000, 64'h30303030, 64'h3130324072656C6C, 64'h6F72746E6F632D65, 64'h6863616301000000, 64'h2000000006C6F72, 64'h746E6F63A8010000, 64'h800000003000000, 64'h10000000100000, 64'h2E01000008000000, 64'h300000000000030, 64'h303031406765722D, 64'h737365726464612D, 64'h746F6F6201000000, 64'hA101000000000000, 64'h300000000737562, 64'h2D656C706D697300, 64'h636F732D64726179, 64'h706968632C726162, 64'h2D6263751B000000, 64'h2000000003000000, 64'h10000000F000000, 64'h400000003000000, 64'h100000000000000, 64'h400000003000000, 64'h636F7301000000, 64'h200000002000000, 64'h9901000004000000, 64'h300000000000010, 64'h802E010000, 64'h800000003000000, 64'h79726F6D656D, 64'hA600000007000000, 64'h300000000303030, 64'h3030303038407972, 64'h6F6D656D01000000, 64'h200000003000000, 64'h9901000004000000, 64'h300000000000000, 64'h64656C6261736964, 64'h6201000009000000, 64'h300000000000100, 64'h82E010000, 64'h800000003000000, 64'h79726F6D656D, 64'hA600000007000000, 64'h300000000003030, 64'h3030303038407972, 64'h6F6D656D01000000, 64'h200000000000030, 64'h666974682C626375, 64'h1B0000000A000000, 64'h300000000000000, 64'h6669746801000000, 64'h200000002000000, 64'h200000004000000, 64'h9901000004000000, 64'h300000084010000, 64'h3000000, 64'h63746E692D75, 64'h70632C7663736972, 64'h1B0000000F000000, 64'h300000001000000, 64'h7301000004000000, 64'h300000000000000, 64'h72656C6C6F72746E, 64'h6F632D7470757272, 64'h65746E6901000000, 64'h6901000000000000, 64'h300000020A10700, 64'h4000000004000000, 64'h300000000000000, 64'h79616B6F62010000, 64'h500000003000000, 64'h800000051010000, 64'h400000003000000, 64'h40000003C010000, 64'h400000003000000, 64'h6262767A5F, 64'h73627A5F62627A5F, 64'h61627A5F68667A5F, 64'h6866767A5F643436, 64'h65767A5F62383231, 64'h6C767A5F6D706869, 64'h7A5F6965636E6566, 64'h697A5F727363697A, 64'h7662636466616D69, 64'h3436767232010000, 64'h4A00000003000000, 64'h2E010000, 64'h400000003000000, 64'h10000001D010000, 64'h400000003000000, 64'h393376732C76, 64'h6373697214010000, 64'hB00000003000000, 64'h2000000009010000, 64'h400000003000000, 64'h1000000FE000000, 64'h400000003000000, 64'h800000F1000000, 64'h400000003000000, 64'h40000000E4000000, 64'h400000003000000, 64'h40000000D1000000, 64'h400000003000000, 64'h1000000B2000000, 64'h400000003000000, 64'h757063A6000000, 64'h400000003000000, 64'h200000009B000000, 64'h400000003000000, 64'h100000090000000, 64'h400000003000000, 64'h80000083000000, 64'h400000003000000, 64'h4000000076000000, 64'h400000003000000, 64'h4000000063000000, 64'h400000003000000, 64'h76637369, 64'h72003074656B636F, 64'h722C657669666973, 64'h1B00000015000000, 64'h300000000000000, 64'h5300000004000000, 64'h300000000000030, 64'h4075706301000000, 64'h20A1070040000000, 64'h400000003000000, 64'hF000000, 64'h400000003000000, 64'h100000000000000, 64'h400000003000000, 64'h73757063, 64'h100000002000000, 64'h30303030, 64'h32303031406C6169, 64'h7265732F636F732F, 64'h3400000015000000, 64'h300000000006E65, 64'h736F686301000000, 64'h200000000000000, 64'h3030303032303031, 64'h406C61697265732F, 64'h636F732F2C000000, 64'h1500000003000000, 64'h73657361696C61, 64'h100000000000000, 64'h6472617970696863, 64'h2C7261622D626375, 64'h2600000011000000, 64'h300000000000000, 64'h7665642D64726179, 64'h706968632C726162, 64'h2D6263751B000000, 64'h1500000003000000, 64'h10000000F000000, 64'h400000003000000, 64'h100000000000000, 64'h400000003000000, 64'h1000000, 64'h0, 64'h0, 64'h8C0B000060020000, 64'h10000000, 64'h1100000028000000, 64'hC40B000038000000, 64'h240E0000EDFE0DD0, 64'h1330200073, 64'h3006307308000613, 64'h185859300000597, 64'hF140257334151073, 64'h5350300001537, 64'h5A02300B505B3, 64'h251513FE029EE3, 64'h5A283F81FF06F, 64'h0, 64'h0, 64'h2C0006F, 64'hFE069AE3FFC62683, 64'h46061300D62023, 64'h10069300458613, 64'h380006F00050463, 64'hF1402573020005B7, 64'hFFDFF06F, 64'h1050007330052073, 64'h3045107300800513, 64'h3445307322200513, 64'h3030107300028863, 64'h12F2934122D293, 64'h301022F330551073, 64'h405051300000517}; TLMonitor_54 monitor ( // @[Nodes.scala:27:25] .clock (clock), .reset (reset), .io_in_a_ready (auto_in_d_ready), .io_in_a_valid (auto_in_a_valid), .io_in_a_bits_opcode (auto_in_a_bits_opcode), .io_in_a_bits_param (auto_in_a_bits_param), .io_in_a_bits_size (auto_in_a_bits_size), .io_in_a_bits_source (auto_in_a_bits_source), .io_in_a_bits_address (auto_in_a_bits_address), .io_in_a_bits_mask (auto_in_a_bits_mask), .io_in_a_bits_corrupt (auto_in_a_bits_corrupt), .io_in_d_ready (auto_in_d_ready), .io_in_d_valid (auto_in_a_valid), .io_in_d_bits_size (auto_in_a_bits_size), .io_in_d_bits_source (auto_in_a_bits_source) ); // @[Nodes.scala:27:25] assign auto_in_a_ready = auto_in_d_ready; // @[BootROM.scala:41:9] assign auto_in_d_valid = auto_in_a_valid; // @[BootROM.scala:41:9] assign auto_in_d_bits_size = auto_in_a_bits_size; // @[BootROM.scala:41:9] assign auto_in_d_bits_source = auto_in_a_bits_source; // @[BootROM.scala:41:9] assign auto_in_d_bits_data = (|(auto_in_a_bits_address[15:12])) ? 64'h0 : _GEN[auto_in_a_bits_address[11:3]]; // @[BootROM.scala:41:9, :55:34, :56:64, :57:{47,53}] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w1_d3_i0_64( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7] wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire io_q_0; // @[SynchronizerReg.scala:80:7] assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_96 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_1), // @[SynchronizerReg.scala:87:41] .io_q (output_0) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Tile.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ import Util._ /** * A Tile is a purely combinational 2D array of passThrough PEs. * a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs * @param width The data width of each PE in bits * @param rows Number of PEs on each row * @param columns Number of PEs on each column */ class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module { val io = IO(new Bundle { val in_a = Input(Vec(rows, inputType)) val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it val in_d = Input(Vec(columns, outputType)) val in_control = Input(Vec(columns, new PEControl(accType))) val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val in_last = Input(Vec(columns, Bool())) val out_a = Output(Vec(rows, inputType)) val out_c = Output(Vec(columns, outputType)) val out_b = Output(Vec(columns, outputType)) val out_control = Output(Vec(columns, new PEControl(accType))) val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val out_last = Output(Vec(columns, Bool())) val in_valid = Input(Vec(columns, Bool())) val out_valid = Output(Vec(columns, Bool())) val bad_dataflow = Output(Bool()) }) import ev._ val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls))) val tileT = tile.transpose // TODO: abstract hori/vert broadcast, all these connections look the same // Broadcast 'a' horizontally across the Tile for (r <- 0 until rows) { tile(r).foldLeft(io.in_a(r)) { case (in_a, pe) => pe.io.in_a := in_a pe.io.out_a } } // Broadcast 'b' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_b(c)) { case (in_b, pe) => pe.io.in_b := (if (tree_reduction) in_b.zero else in_b) pe.io.out_b } } // Broadcast 'd' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_d(c)) { case (in_d, pe) => pe.io.in_d := in_d pe.io.out_c } } // Broadcast 'control' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_control(c)) { case (in_ctrl, pe) => pe.io.in_control := in_ctrl pe.io.out_control } } // Broadcast 'garbage' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_valid(c)) { case (v, pe) => pe.io.in_valid := v pe.io.out_valid } } // Broadcast 'id' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_id(c)) { case (id, pe) => pe.io.in_id := id pe.io.out_id } } // Broadcast 'last' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_last(c)) { case (last, pe) => pe.io.in_last := last pe.io.out_last } } // Drive the Tile's bottom IO for (c <- 0 until columns) { io.out_c(c) := tile(rows-1)(c).io.out_c io.out_control(c) := tile(rows-1)(c).io.out_control io.out_id(c) := tile(rows-1)(c).io.out_id io.out_last(c) := tile(rows-1)(c).io.out_last io.out_valid(c) := tile(rows-1)(c).io.out_valid io.out_b(c) := { if (tree_reduction) { val prods = tileT(c).map(_.io.out_b) accumulateTree(prods :+ io.in_b(c)) } else { tile(rows - 1)(c).io.out_b } } } io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_) // Drive the Tile's right IO for (r <- 0 until rows) { io.out_a(r) := tile(r)(columns-1).io.out_a } }
module Tile_69( // @[Tile.scala:16:7] input clock, // @[Tile.scala:16:7] input reset, // @[Tile.scala:16:7] input [7:0] io_in_a_0, // @[Tile.scala:17:14] input [19:0] io_in_b_0, // @[Tile.scala:17:14] input [19:0] io_in_d_0, // @[Tile.scala:17:14] input io_in_control_0_dataflow, // @[Tile.scala:17:14] input io_in_control_0_propagate, // @[Tile.scala:17:14] input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14] input [2:0] io_in_id_0, // @[Tile.scala:17:14] input io_in_last_0, // @[Tile.scala:17:14] output [7:0] io_out_a_0, // @[Tile.scala:17:14] output [19:0] io_out_c_0, // @[Tile.scala:17:14] output [19:0] io_out_b_0, // @[Tile.scala:17:14] output io_out_control_0_dataflow, // @[Tile.scala:17:14] output io_out_control_0_propagate, // @[Tile.scala:17:14] output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14] output [2:0] io_out_id_0, // @[Tile.scala:17:14] output io_out_last_0, // @[Tile.scala:17:14] input io_in_valid_0, // @[Tile.scala:17:14] output io_out_valid_0, // @[Tile.scala:17:14] output io_bad_dataflow // @[Tile.scala:17:14] ); wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7] wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7] wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7] wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7] wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7] wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7] wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7] wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7] wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7] wire [7:0] io_out_a_0_0; // @[Tile.scala:16:7] wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7] wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7] wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7] wire io_out_control_0_propagate_0; // @[Tile.scala:16:7] wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7] wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7] wire io_out_last_0_0; // @[Tile.scala:16:7] wire io_out_valid_0_0; // @[Tile.scala:16:7] wire io_bad_dataflow_0; // @[Tile.scala:16:7] PE_325 tile_0_0 ( // @[Tile.scala:42:44] .clock (clock), .reset (reset), .io_in_a (io_in_a_0_0), // @[Tile.scala:16:7] .io_in_b (io_in_b_0_0), // @[Tile.scala:16:7] .io_in_d (io_in_d_0_0), // @[Tile.scala:16:7] .io_out_a (io_out_a_0_0), .io_out_b (io_out_b_0_0), .io_out_c (io_out_c_0_0), .io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7] .io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7] .io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7] .io_out_control_dataflow (io_out_control_0_dataflow_0), .io_out_control_propagate (io_out_control_0_propagate_0), .io_out_control_shift (io_out_control_0_shift_0), .io_in_id (io_in_id_0_0), // @[Tile.scala:16:7] .io_out_id (io_out_id_0_0), .io_in_last (io_in_last_0_0), // @[Tile.scala:16:7] .io_out_last (io_out_last_0_0), .io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7] .io_out_valid (io_out_valid_0_0), .io_bad_dataflow (io_bad_dataflow_0) ); // @[Tile.scala:42:44] assign io_out_a_0 = io_out_a_0_0; // @[Tile.scala:16:7] assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7] assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7] assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7] assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7] assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7] assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7] assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7] assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7] assign io_bad_dataflow = io_bad_dataflow_0; // @[Tile.scala:16:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File RoundAnyRawFNToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util.Fill import consts._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundAnyRawFNToRecFN( inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int, options: Int ) extends RawModule { override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(inExpWidth, inSigWidth)) // (allowed exponent range has limits) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((outExpWidth + outSigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0) val effectiveInSigWidth = if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1 val neverUnderflows = ((options & (flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact) ) != 0) || (inExpWidth < outExpWidth) val neverOverflows = ((options & flRoundOpt_neverOverflows) != 0) || (inExpWidth < outExpWidth) val outNaNExp = BigInt(7)<<(outExpWidth - 2) val outInfExp = BigInt(6)<<(outExpWidth - 2) val outMaxFiniteExp = outInfExp - 1 val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2 val outMinNonzeroExp = outMinNormExp - outSigWidth + 1 //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundingMode_near_even = (io.roundingMode === round_near_even) val roundingMode_minMag = (io.roundingMode === round_minMag) val roundingMode_min = (io.roundingMode === round_min) val roundingMode_max = (io.roundingMode === round_max) val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag) val roundingMode_odd = (io.roundingMode === round_odd) val roundMagUp = (roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sAdjustedExp = if (inExpWidth < outExpWidth) (io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S )(outExpWidth, 0).zext else if (inExpWidth == outExpWidth) io.in.sExp else io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S val adjustedSig = if (inSigWidth <= outSigWidth + 2) io.in.sig<<(outSigWidth - inSigWidth + 2) else (io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ## io.in.sig(inSigWidth - outSigWidth - 2, 0).orR ) val doShiftSigDown1 = if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2) val common_expOut = Wire(UInt((outExpWidth + 1).W)) val common_fractOut = Wire(UInt((outSigWidth - 1).W)) val common_overflow = Wire(Bool()) val common_totalUnderflow = Wire(Bool()) val common_underflow = Wire(Bool()) val common_inexact = Wire(Bool()) if ( neverOverflows && neverUnderflows && (effectiveInSigWidth <= outSigWidth) ) { //-------------------------------------------------------------------- //-------------------------------------------------------------------- common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1 common_fractOut := Mux(doShiftSigDown1, adjustedSig(outSigWidth + 1, 3), adjustedSig(outSigWidth, 2) ) common_overflow := false.B common_totalUnderflow := false.B common_underflow := false.B common_inexact := false.B } else { //-------------------------------------------------------------------- //-------------------------------------------------------------------- val roundMask = if (neverUnderflows) 0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W) else (lowMask( sAdjustedExp(outExpWidth, 0), outMinNormExp - outSigWidth - 1, outMinNormExp ) | doShiftSigDown1) ## 3.U(2.W) val shiftedRoundMask = 0.U(1.W) ## roundMask>>1 val roundPosMask = ~shiftedRoundMask & roundMask val roundPosBit = (adjustedSig & roundPosMask).orR val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR val anyRound = roundPosBit || anyRoundExtra val roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && roundPosBit) || (roundMagUp && anyRound) val roundedSig: Bits = Mux(roundIncr, (((adjustedSig | roundMask)>>2) +& 1.U) & ~Mux(roundingMode_near_even && roundPosBit && ! anyRoundExtra, roundMask>>1, 0.U((outSigWidth + 2).W) ), (adjustedSig & ~roundMask)>>2 | Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U) ) //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext common_expOut := sRoundedExp(outExpWidth, 0) common_fractOut := Mux(doShiftSigDown1, roundedSig(outSigWidth - 1, 1), roundedSig(outSigWidth - 2, 0) ) common_overflow := (if (neverOverflows) false.B else //*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?: (sRoundedExp>>(outExpWidth - 1) >= 3.S)) common_totalUnderflow := (if (neverUnderflows) false.B else //*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?: (sRoundedExp < outMinNonzeroExp.S)) val unboundedRange_roundPosBit = Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1)) val unboundedRange_anyRound = (doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR val unboundedRange_roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && unboundedRange_roundPosBit) || (roundMagUp && unboundedRange_anyRound) val roundCarry = Mux(doShiftSigDown1, roundedSig(outSigWidth + 1), roundedSig(outSigWidth) ) common_underflow := (if (neverUnderflows) false.B else common_totalUnderflow || //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? (anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) && Mux(doShiftSigDown1, roundMask(3), roundMask(2)) && ! ((io.detectTininess === tininess_afterRounding) && ! Mux(doShiftSigDown1, roundMask(4), roundMask(3) ) && roundCarry && roundPosBit && unboundedRange_roundIncr))) common_inexact := common_totalUnderflow || anyRound } //------------------------------------------------------------------------ //------------------------------------------------------------------------ val isNaNOut = io.invalidExc || io.in.isNaN val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero val overflow = commonCase && common_overflow val underflow = commonCase && common_underflow val inexact = overflow || (commonCase && common_inexact) val overflow_roundMagUp = roundingMode_near_even || roundingMode_near_maxMag || roundMagUp val pegMinNonzeroMagOut = commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd) val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp val notNaN_isInfOut = notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp) val signOut = Mux(isNaNOut, false.B, io.in.sign) val expOut = (common_expOut & ~Mux(io.in.isZero || common_totalUnderflow, (BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMinNonzeroMagOut, ~outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMaxFiniteMagOut, (BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W), 0.U ) & ~Mux(notNaN_isInfOut, (BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U )) | Mux(pegMinNonzeroMagOut, outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) | Mux(pegMaxFiniteMagOut, outMaxFiniteExp.U((outExpWidth + 1).W), 0.U ) | Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) | Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U) val fractOut = Mux(isNaNOut || io.in.isZero || common_totalUnderflow, Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U), common_fractOut ) | Fill(outSigWidth - 1, pegMaxFiniteMagOut) io.out := signOut ## expOut ## fractOut io.exceptionFlags := io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int) extends RawModule { override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(expWidth, sigWidth + 2)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( expWidth, sigWidth + 2, expWidth, sigWidth, options)) roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc roundAnyRawFNToRecFN.io.in := io.in roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags }
module RoundAnyRawFNToRecFN_ie7_is64_oe5_os11_7( // @[RoundAnyRawFNToRecFN.scala:48:5] input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:58:16] input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:58:16] input [8:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:58:16] input [64:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:58:16] input [2:0] io_roundingMode, // @[RoundAnyRawFNToRecFN.scala:58:16] output [16:0] io_out, // @[RoundAnyRawFNToRecFN.scala:58:16] output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:58:16] ); wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [8:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [64:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [2:0] io_roundingMode_0 = io_roundingMode; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [11:0] _roundMask_T = 12'h0; // @[RoundAnyRawFNToRecFN.scala:153:36] wire [5:0] _expOut_T_4 = 6'h37; // @[RoundAnyRawFNToRecFN.scala:258:19] wire [13:0] roundMask = 14'h3; // @[RoundAnyRawFNToRecFN.scala:153:55] wire [14:0] _shiftedRoundMask_T = 15'h3; // @[RoundAnyRawFNToRecFN.scala:162:41] wire [13:0] shiftedRoundMask = 14'h1; // @[RoundAnyRawFNToRecFN.scala:162:53] wire [13:0] _roundPosMask_T = 14'h3FFE; // @[RoundAnyRawFNToRecFN.scala:163:28] wire [13:0] roundPosMask = 14'h2; // @[RoundAnyRawFNToRecFN.scala:163:46] wire [13:0] _roundedSig_T_10 = 14'h3FFC; // @[RoundAnyRawFNToRecFN.scala:180:32] wire [12:0] _roundedSig_T_6 = 13'h1; // @[RoundAnyRawFNToRecFN.scala:177:35, :181:67] wire [12:0] _roundedSig_T_14 = 13'h1; // @[RoundAnyRawFNToRecFN.scala:177:35, :181:67] wire [5:0] _expOut_T_6 = 6'h3F; // @[RoundAnyRawFNToRecFN.scala:257:14] wire [5:0] _expOut_T_5 = 6'h0; // @[RoundAnyRawFNToRecFN.scala:257:18] wire [5:0] _expOut_T_14 = 6'h0; // @[RoundAnyRawFNToRecFN.scala:269:16] wire [5:0] _expOut_T_20 = 6'h0; // @[RoundAnyRawFNToRecFN.scala:278:16] wire [9:0] _fractOut_T_2 = 10'h0; // @[RoundAnyRawFNToRecFN.scala:281:16] wire [1:0] _io_exceptionFlags_T = 2'h0; // @[RoundAnyRawFNToRecFN.scala:288:23] wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5] wire _commonCase_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:237:22] wire _commonCase_T_1 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:237:36] wire _commonCase_T_2 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:237:33] wire io_invalidExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_isNaN = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_isInf = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire common_totalUnderflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:125:37] wire common_underflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:126:37] wire _unboundedRange_anyRound_T_1 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:205:30] wire isNaNOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:235:34] wire notNaN_isSpecialInfOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:236:49] wire underflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:239:32] wire _pegMinNonzeroMagOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:20] wire pegMinNonzeroMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:45] wire _expOut_T = io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :253:32] wire _fractOut_T = io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :280:22] wire signOut = io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :250:22] wire [16:0] _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:286:33] wire [4:0] _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:288:66] wire [16:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire roundingMode_near_even = io_roundingMode_0 == 3'h0; // @[RoundAnyRawFNToRecFN.scala:48:5, :90:53] wire roundingMode_minMag = io_roundingMode_0 == 3'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :91:53] wire roundingMode_min = io_roundingMode_0 == 3'h2; // @[RoundAnyRawFNToRecFN.scala:48:5, :92:53] wire roundingMode_max = io_roundingMode_0 == 3'h3; // @[RoundAnyRawFNToRecFN.scala:48:5, :93:53] wire roundingMode_near_maxMag = io_roundingMode_0 == 3'h4; // @[RoundAnyRawFNToRecFN.scala:48:5, :94:53] wire roundingMode_odd = io_roundingMode_0 == 3'h6; // @[RoundAnyRawFNToRecFN.scala:48:5, :95:53] wire _roundMagUp_T = roundingMode_min & io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :92:53, :98:27] wire _roundMagUp_T_1 = ~io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :98:66] wire _roundMagUp_T_2 = roundingMode_max & _roundMagUp_T_1; // @[RoundAnyRawFNToRecFN.scala:93:53, :98:{63,66}] wire roundMagUp = _roundMagUp_T | _roundMagUp_T_2; // @[RoundAnyRawFNToRecFN.scala:98:{27,42,63}] wire [9:0] sAdjustedExp = {io_in_sExp_0[8], io_in_sExp_0} - 10'h60; // @[RoundAnyRawFNToRecFN.scala:48:5, :110:24] wire [12:0] _adjustedSig_T = io_in_sig_0[64:52]; // @[RoundAnyRawFNToRecFN.scala:48:5, :116:23] wire [51:0] _adjustedSig_T_1 = io_in_sig_0[51:0]; // @[RoundAnyRawFNToRecFN.scala:48:5, :117:26] wire _adjustedSig_T_2 = |_adjustedSig_T_1; // @[RoundAnyRawFNToRecFN.scala:117:{26,60}] wire [13:0] adjustedSig = {_adjustedSig_T, _adjustedSig_T_2}; // @[RoundAnyRawFNToRecFN.scala:116:{23,66}, :117:60] wire [5:0] _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:187:37] wire [5:0] common_expOut; // @[RoundAnyRawFNToRecFN.scala:122:31] wire [9:0] _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:189:16] wire [9:0] common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31] wire _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:196:50] wire common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37] wire _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:230:49] wire common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37] wire [13:0] _roundPosBit_T = adjustedSig & 14'h2; // @[RoundAnyRawFNToRecFN.scala:116:66, :163:46, :164:40] wire roundPosBit = |_roundPosBit_T; // @[RoundAnyRawFNToRecFN.scala:164:{40,56}] wire [13:0] _anyRoundExtra_T = adjustedSig & 14'h1; // @[RoundAnyRawFNToRecFN.scala:116:66, :162:53, :165:42] wire anyRoundExtra = |_anyRoundExtra_T; // @[RoundAnyRawFNToRecFN.scala:165:{42,62}] wire anyRound = roundPosBit | anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:164:56, :165:62, :166:36] assign _common_inexact_T = anyRound; // @[RoundAnyRawFNToRecFN.scala:166:36, :230:49] wire _GEN = roundingMode_near_even | roundingMode_near_maxMag; // @[RoundAnyRawFNToRecFN.scala:90:53, :94:53, :169:38] wire _roundIncr_T; // @[RoundAnyRawFNToRecFN.scala:169:38] assign _roundIncr_T = _GEN; // @[RoundAnyRawFNToRecFN.scala:169:38] wire _unboundedRange_roundIncr_T; // @[RoundAnyRawFNToRecFN.scala:207:38] assign _unboundedRange_roundIncr_T = _GEN; // @[RoundAnyRawFNToRecFN.scala:169:38, :207:38] wire _overflow_roundMagUp_T; // @[RoundAnyRawFNToRecFN.scala:243:32] assign _overflow_roundMagUp_T = _GEN; // @[RoundAnyRawFNToRecFN.scala:169:38, :243:32] wire _roundIncr_T_1 = _roundIncr_T & roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :169:{38,67}] wire _roundIncr_T_2 = roundMagUp & anyRound; // @[RoundAnyRawFNToRecFN.scala:98:42, :166:36, :171:29] wire roundIncr = _roundIncr_T_1 | _roundIncr_T_2; // @[RoundAnyRawFNToRecFN.scala:169:67, :170:31, :171:29] wire [13:0] _roundedSig_T = adjustedSig | 14'h3; // @[RoundAnyRawFNToRecFN.scala:116:66, :153:55, :174:32] wire [11:0] _roundedSig_T_1 = _roundedSig_T[13:2]; // @[RoundAnyRawFNToRecFN.scala:174:{32,44}] wire [12:0] _roundedSig_T_2 = {1'h0, _roundedSig_T_1} + 13'h1; // @[RoundAnyRawFNToRecFN.scala:174:{44,49}, :177:35, :181:67] wire _roundedSig_T_3 = roundingMode_near_even & roundPosBit; // @[RoundAnyRawFNToRecFN.scala:90:53, :164:56, :175:49] wire _roundedSig_T_4 = ~anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:165:62, :176:30] wire _roundedSig_T_5 = _roundedSig_T_3 & _roundedSig_T_4; // @[RoundAnyRawFNToRecFN.scala:175:{49,64}, :176:30] wire [12:0] _roundedSig_T_7 = {12'h0, _roundedSig_T_5}; // @[RoundAnyRawFNToRecFN.scala:175:{25,64}] wire [12:0] _roundedSig_T_8 = ~_roundedSig_T_7; // @[RoundAnyRawFNToRecFN.scala:175:{21,25}] wire [12:0] _roundedSig_T_9 = _roundedSig_T_2 & _roundedSig_T_8; // @[RoundAnyRawFNToRecFN.scala:174:{49,57}, :175:21] wire [13:0] _roundedSig_T_11 = adjustedSig & 14'h3FFC; // @[RoundAnyRawFNToRecFN.scala:116:66, :180:{30,32}] wire [11:0] _roundedSig_T_12 = _roundedSig_T_11[13:2]; // @[RoundAnyRawFNToRecFN.scala:180:{30,43}] wire _roundedSig_T_13 = roundingMode_odd & anyRound; // @[RoundAnyRawFNToRecFN.scala:95:53, :166:36, :181:42] wire [12:0] _roundedSig_T_15 = {12'h0, _roundedSig_T_13}; // @[RoundAnyRawFNToRecFN.scala:181:{24,42}] wire [12:0] _roundedSig_T_16 = {1'h0, _roundedSig_T_12} | _roundedSig_T_15; // @[RoundAnyRawFNToRecFN.scala:180:{43,47}, :181:24] wire [12:0] roundedSig = roundIncr ? _roundedSig_T_9 : _roundedSig_T_16; // @[RoundAnyRawFNToRecFN.scala:170:31, :173:16, :174:57, :180:47] wire [1:0] _sRoundedExp_T = roundedSig[12:11]; // @[RoundAnyRawFNToRecFN.scala:173:16, :185:54] wire [2:0] _sRoundedExp_T_1 = {1'h0, _sRoundedExp_T}; // @[RoundAnyRawFNToRecFN.scala:185:{54,76}] wire [10:0] sRoundedExp = {sAdjustedExp[9], sAdjustedExp} + {{8{_sRoundedExp_T_1[2]}}, _sRoundedExp_T_1}; // @[RoundAnyRawFNToRecFN.scala:110:24, :185:{40,76}] assign _common_expOut_T = sRoundedExp[5:0]; // @[RoundAnyRawFNToRecFN.scala:185:40, :187:37] assign common_expOut = _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:122:31, :187:37] wire [9:0] _common_fractOut_T = roundedSig[10:1]; // @[RoundAnyRawFNToRecFN.scala:173:16, :190:27] wire [9:0] _common_fractOut_T_1 = roundedSig[9:0]; // @[RoundAnyRawFNToRecFN.scala:173:16, :191:27] assign _common_fractOut_T_2 = _common_fractOut_T_1; // @[RoundAnyRawFNToRecFN.scala:189:16, :191:27] assign common_fractOut = _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:123:31, :189:16] wire [6:0] _common_overflow_T = sRoundedExp[10:4]; // @[RoundAnyRawFNToRecFN.scala:185:40, :196:30] assign _common_overflow_T_1 = $signed(_common_overflow_T) > 7'sh2; // @[RoundAnyRawFNToRecFN.scala:196:{30,50}] assign common_overflow = _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:124:37, :196:50] wire _unboundedRange_roundPosBit_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:116:66, :203:45] wire _unboundedRange_anyRound_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:116:66, :203:45, :205:44] wire _unboundedRange_roundPosBit_T_1 = adjustedSig[1]; // @[RoundAnyRawFNToRecFN.scala:116:66, :203:61] wire unboundedRange_roundPosBit = _unboundedRange_roundPosBit_T_1; // @[RoundAnyRawFNToRecFN.scala:203:{16,61}] wire [1:0] _unboundedRange_anyRound_T_2 = adjustedSig[1:0]; // @[RoundAnyRawFNToRecFN.scala:116:66, :205:63] wire _unboundedRange_anyRound_T_3 = |_unboundedRange_anyRound_T_2; // @[RoundAnyRawFNToRecFN.scala:205:{63,70}] wire unboundedRange_anyRound = _unboundedRange_anyRound_T_3; // @[RoundAnyRawFNToRecFN.scala:205:{49,70}] wire _unboundedRange_roundIncr_T_1 = _unboundedRange_roundIncr_T & unboundedRange_roundPosBit; // @[RoundAnyRawFNToRecFN.scala:203:16, :207:{38,67}] wire _unboundedRange_roundIncr_T_2 = roundMagUp & unboundedRange_anyRound; // @[RoundAnyRawFNToRecFN.scala:98:42, :205:49, :209:29] wire unboundedRange_roundIncr = _unboundedRange_roundIncr_T_1 | _unboundedRange_roundIncr_T_2; // @[RoundAnyRawFNToRecFN.scala:207:67, :208:46, :209:29] wire _roundCarry_T = roundedSig[12]; // @[RoundAnyRawFNToRecFN.scala:173:16, :212:27] wire _roundCarry_T_1 = roundedSig[11]; // @[RoundAnyRawFNToRecFN.scala:173:16, :213:27] wire roundCarry = _roundCarry_T_1; // @[RoundAnyRawFNToRecFN.scala:211:16, :213:27] assign common_inexact = _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:127:37, :230:49] wire _commonCase_T_3 = ~io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :237:64] wire commonCase = _commonCase_T_3; // @[RoundAnyRawFNToRecFN.scala:237:{61,64}] wire overflow = commonCase & common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37, :237:61, :238:32] wire _inexact_T = commonCase & common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37, :237:61, :240:43] wire inexact = overflow | _inexact_T; // @[RoundAnyRawFNToRecFN.scala:238:32, :240:{28,43}] wire overflow_roundMagUp = _overflow_roundMagUp_T | roundMagUp; // @[RoundAnyRawFNToRecFN.scala:98:42, :243:{32,60}] wire _pegMinNonzeroMagOut_T_1 = roundMagUp | roundingMode_odd; // @[RoundAnyRawFNToRecFN.scala:95:53, :98:42, :245:60] wire _pegMaxFiniteMagOut_T = ~overflow_roundMagUp; // @[RoundAnyRawFNToRecFN.scala:243:60, :246:42] wire pegMaxFiniteMagOut = overflow & _pegMaxFiniteMagOut_T; // @[RoundAnyRawFNToRecFN.scala:238:32, :246:{39,42}] wire _notNaN_isInfOut_T = overflow & overflow_roundMagUp; // @[RoundAnyRawFNToRecFN.scala:238:32, :243:60, :248:45] wire notNaN_isInfOut = _notNaN_isInfOut_T; // @[RoundAnyRawFNToRecFN.scala:248:{32,45}] wire [5:0] _expOut_T_1 = _expOut_T ? 6'h38 : 6'h0; // @[RoundAnyRawFNToRecFN.scala:253:{18,32}] wire [5:0] _expOut_T_2 = ~_expOut_T_1; // @[RoundAnyRawFNToRecFN.scala:253:{14,18}] wire [5:0] _expOut_T_3 = common_expOut & _expOut_T_2; // @[RoundAnyRawFNToRecFN.scala:122:31, :252:24, :253:14] wire [5:0] _expOut_T_7 = _expOut_T_3; // @[RoundAnyRawFNToRecFN.scala:252:24, :256:17] wire [5:0] _expOut_T_8 = {1'h0, pegMaxFiniteMagOut, 4'h0}; // @[RoundAnyRawFNToRecFN.scala:246:39, :261:18] wire [5:0] _expOut_T_9 = ~_expOut_T_8; // @[RoundAnyRawFNToRecFN.scala:261:{14,18}] wire [5:0] _expOut_T_10 = _expOut_T_7 & _expOut_T_9; // @[RoundAnyRawFNToRecFN.scala:256:17, :260:17, :261:14] wire [5:0] _expOut_T_11 = {2'h0, notNaN_isInfOut, 3'h0}; // @[RoundAnyRawFNToRecFN.scala:248:32, :265:18] wire [5:0] _expOut_T_12 = ~_expOut_T_11; // @[RoundAnyRawFNToRecFN.scala:265:{14,18}] wire [5:0] _expOut_T_13 = _expOut_T_10 & _expOut_T_12; // @[RoundAnyRawFNToRecFN.scala:260:17, :264:17, :265:14] wire [5:0] _expOut_T_15 = _expOut_T_13; // @[RoundAnyRawFNToRecFN.scala:264:17, :268:18] wire [5:0] _expOut_T_16 = pegMaxFiniteMagOut ? 6'h2F : 6'h0; // @[RoundAnyRawFNToRecFN.scala:246:39, :273:16] wire [5:0] _expOut_T_17 = _expOut_T_15 | _expOut_T_16; // @[RoundAnyRawFNToRecFN.scala:268:18, :272:15, :273:16] wire [5:0] _expOut_T_18 = notNaN_isInfOut ? 6'h30 : 6'h0; // @[RoundAnyRawFNToRecFN.scala:248:32, :277:16] wire [5:0] _expOut_T_19 = _expOut_T_17 | _expOut_T_18; // @[RoundAnyRawFNToRecFN.scala:272:15, :276:15, :277:16] wire [5:0] expOut = _expOut_T_19; // @[RoundAnyRawFNToRecFN.scala:276:15, :277:73] wire _fractOut_T_1 = _fractOut_T; // @[RoundAnyRawFNToRecFN.scala:280:{22,38}] wire [9:0] _fractOut_T_3 = _fractOut_T_1 ? 10'h0 : common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31, :280:{12,38}] wire [9:0] _fractOut_T_4 = {10{pegMaxFiniteMagOut}}; // @[RoundAnyRawFNToRecFN.scala:246:39, :284:13] wire [9:0] fractOut = _fractOut_T_3 | _fractOut_T_4; // @[RoundAnyRawFNToRecFN.scala:280:12, :283:11, :284:13] wire [6:0] _io_out_T = {signOut, expOut}; // @[RoundAnyRawFNToRecFN.scala:250:22, :277:73, :286:23] assign _io_out_T_1 = {_io_out_T, fractOut}; // @[RoundAnyRawFNToRecFN.scala:283:11, :286:{23,33}] assign io_out_0 = _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:48:5, :286:33] wire [2:0] _io_exceptionFlags_T_1 = {2'h0, overflow}; // @[RoundAnyRawFNToRecFN.scala:238:32, :288:41] wire [3:0] _io_exceptionFlags_T_2 = {_io_exceptionFlags_T_1, 1'h0}; // @[RoundAnyRawFNToRecFN.scala:288:{41,53}] assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, inexact}; // @[RoundAnyRawFNToRecFN.scala:240:28, :288:{53,66}] assign io_exceptionFlags_0 = _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:66] assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5] assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Strided.scala: package barf import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.diplomacy._ import freechips.rocketchip.util._ import freechips.rocketchip.tile._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.subsystem.{CacheBlockBytes} case class SingleStridedPrefetcherParams( history: Int = 4, //Number of times a delta must be seen before prefetching ahead: Int = 1, reset_depth: Int = 8 ) extends CanInstantiatePrefetcher { def desc() = "Single Strided Prefetcher" def instantiate()(implicit p: Parameters) = Module(new StridedPrefetcher(this)(p)) } class StridedPrefetcher(params: SingleStridedPrefetcherParams)(implicit p: Parameters) extends AbstractPrefetcher()(p) { val historyWidth = log2Up(params.history * 2) val block_bits = log2Up(io.request.bits.blockBytes) val ahead_bits = log2Up(params.ahead) val reset_depth_bits = log2Up(params.reset_depth) val s_idle :: s_wait :: s_active :: s_done :: Nil = Enum(4) val state = RegInit(s_idle) val prefetch = Reg(UInt()) val delta = RegInit(0.U) val history_cnt = RegInit(0.U(historyWidth.W)) val last_snoop = Reg(UInt()) val last_snoop_2 = Reg(UInt()) val last_write = Reg(Bool()) val valid_delay = Reg(Bool()) val pref_far_enough = RegInit(false.B) val last_delta = Reg(UInt()) val delta_pos = Reg(Bool()) val last_delta_pos = Reg(Bool()) val reset_counter = RegInit(0.U(reset_depth_bits.W)) val history_reset = Reg(Bool()) last_snoop := Mux(io.snoop.valid, io.snoop.bits.address, last_snoop) last_snoop_2 := Mux(io.snoop.valid, last_snoop, last_snoop_2) last_write := Mux(io.snoop.valid, io.snoop.bits.write, last_write) last_delta := Mux(io.snoop.valid, io.snoop.bits.address - last_snoop, last_delta) last_delta_pos := Mux(io.snoop.valid, io.snoop.bits.address > last_snoop, last_delta_pos) valid_delay := io.snoop.valid history_reset := reset_counter === params.reset_depth.U - 1.U //Saturating counter when (io.snoop.valid) { when(history_reset) { history_cnt := 0.U } .elsewhen (delta === (io.snoop.bits.address - last_snoop) || delta === (io.snoop.bits.address - last_snoop_2)) { history_cnt := Mux(history_cnt === ((params.history * 2) - 1).U, history_cnt, history_cnt + 1.U) } .otherwise { history_cnt := Mux(history_cnt === 0.U, history_cnt, history_cnt - 1.U) } } when(state === s_idle) { reset_counter := 0.U //Begin prefetching when (history_cnt >= params.history.U) { state := s_active delta_pos := last_delta_pos prefetch := last_snoop + last_delta when (delta < io.request.bits.blockBytes.U) { io.snoop.bits.address + (1.U << block_bits.U) } } .otherwise { delta := last_delta } } //auto-activated during history reset when (history_cnt < params.history.U) { state := s_idle } pref_far_enough := prefetch - last_snoop >= (1.U << block_bits.U) io.request.valid := state === s_active && pref_far_enough io.request.bits.address := prefetch io.request.bits.write := last_write when (!pref_far_enough) { prefetch := prefetch + delta } when (state === s_wait) { when ((delta_pos && (prefetch - last_snoop) < (delta << ahead_bits)) || (!delta_pos && (last_snoop - prefetch) < (delta << ahead_bits))) { state := s_active reset_counter := 0.U } .elsewhen(valid_delay) { //snoop didn't trigger prefetch reset_counter := Mux(reset_counter === params.reset_depth.U - 1.U, 0.U, reset_counter + 1.U) } } when (io.request.fire) { prefetch := prefetch + delta when ((delta_pos && (prefetch - last_snoop) < (delta << ahead_bits)) || (!delta_pos && (last_snoop - prefetch) < (delta << ahead_bits))) { //Only continue prefetching if delta is still same state := Mux(history_cnt >= params.history.U, s_active, s_idle) } .otherwise { state := s_wait } } } File AbstractPrefetcher.scala: package barf import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.diplomacy._ import freechips.rocketchip.util._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.subsystem.{CacheBlockBytes} trait CanInstantiatePrefetcher { def desc: String def instantiate()(implicit p: Parameters): AbstractPrefetcher } class Snoop(implicit val p: Parameters) extends Bundle { val blockBytes = p(CacheBlockBytes) val write = Bool() val address = UInt() def block = address >> log2Up(blockBytes) def block_address = block << log2Up(blockBytes) } class Prefetch(implicit val p: Parameters) extends Bundle { val blockBytes = p(CacheBlockBytes) val write = Bool() val address = UInt() def block = address >> log2Up(blockBytes) def block_address = block << log2Up(blockBytes) } class PrefetcherIO(implicit p: Parameters) extends Bundle { val snoop = Input(Valid(new Snoop)) val request = Decoupled(new Prefetch) val hit = Output(Bool()) } abstract class AbstractPrefetcher(implicit p: Parameters) extends Module { val io = IO(new PrefetcherIO) io.request.valid := false.B io.request.bits := DontCare io.request.bits.address := 0.U(1.W) io.hit := false.B } case class NullPrefetcherParams() extends CanInstantiatePrefetcher { def desc() = "Null Prefetcher" def instantiate()(implicit p: Parameters) = Module(new NullPrefetcher()(p)) } class NullPrefetcher(implicit p: Parameters) extends AbstractPrefetcher()(p)
module StridedPrefetcher( // @[Strided.scala:21:7] input clock, // @[Strided.scala:21:7] input reset, // @[Strided.scala:21:7] input io_snoop_valid, // @[AbstractPrefetcher.scala:41:14] input io_snoop_bits_write, // @[AbstractPrefetcher.scala:41:14] input [39:0] io_snoop_bits_address, // @[AbstractPrefetcher.scala:41:14] input io_request_ready, // @[AbstractPrefetcher.scala:41:14] output io_request_valid, // @[AbstractPrefetcher.scala:41:14] output io_request_bits_write, // @[AbstractPrefetcher.scala:41:14] output [39:0] io_request_bits_address // @[AbstractPrefetcher.scala:41:14] ); wire io_snoop_valid_0 = io_snoop_valid; // @[Strided.scala:21:7] wire io_snoop_bits_write_0 = io_snoop_bits_write; // @[Strided.scala:21:7] wire [39:0] io_snoop_bits_address_0 = io_snoop_bits_address; // @[Strided.scala:21:7] wire io_request_ready_0 = io_request_ready; // @[Strided.scala:21:7] wire [4:0] _history_reset_T = 5'h7; // @[Strided.scala:53:59] wire [4:0] _reset_counter_T = 5'h7; // @[Strided.scala:102:67] wire [7:0] _pref_far_enough_T_2 = 8'h40; // @[Strided.scala:86:52] wire [3:0] _history_reset_T_1 = 4'h7; // @[Strided.scala:53:59, :102:67] wire [3:0] _reset_counter_T_1 = 4'h7; // @[Strided.scala:53:59, :102:67] wire io_hit = 1'h0; // @[Strided.scala:21:7] wire _io_request_valid_T_1; // @[Strided.scala:88:42] wire io_request_bits_write_0; // @[Strided.scala:21:7] wire [39:0] io_request_bits_address_0; // @[Strided.scala:21:7] wire io_request_valid_0; // @[Strided.scala:21:7] reg [1:0] state; // @[Strided.scala:28:22] reg [39:0] prefetch; // @[Strided.scala:30:21] assign io_request_bits_address_0 = prefetch; // @[Strided.scala:21:7, :30:21] reg [39:0] delta; // @[Strided.scala:31:22] reg [2:0] history_cnt; // @[Strided.scala:32:28] reg [39:0] last_snoop; // @[Strided.scala:33:23] reg [39:0] last_snoop_2; // @[Strided.scala:34:25] reg last_write; // @[Strided.scala:35:23] assign io_request_bits_write_0 = last_write; // @[Strided.scala:21:7, :35:23] reg valid_delay; // @[Strided.scala:36:24] reg pref_far_enough; // @[Strided.scala:37:32] reg [39:0] last_delta; // @[Strided.scala:39:23] reg delta_pos; // @[Strided.scala:40:22] reg last_delta_pos; // @[Strided.scala:41:27] reg [2:0] reset_counter; // @[Strided.scala:43:30] reg history_reset; // @[Strided.scala:44:26] wire [39:0] _last_snoop_T = io_snoop_valid_0 ? io_snoop_bits_address_0 : last_snoop; // @[Strided.scala:21:7, :33:23, :46:20] wire [39:0] _last_snoop_2_T = io_snoop_valid_0 ? last_snoop : last_snoop_2; // @[Strided.scala:21:7, :33:23, :34:25, :47:22] wire _last_write_T = io_snoop_valid_0 ? io_snoop_bits_write_0 : last_write; // @[Strided.scala:21:7, :35:23, :48:20] wire [40:0] _GEN = {1'h0, last_snoop}; // @[Strided.scala:33:23, :49:59] wire [40:0] _last_delta_T = {1'h0, io_snoop_bits_address_0} - _GEN; // @[Strided.scala:21:7, :49:59] wire [39:0] _last_delta_T_1 = _last_delta_T[39:0]; // @[Strided.scala:49:59] wire [39:0] _last_delta_T_2 = io_snoop_valid_0 ? _last_delta_T_1 : last_delta; // @[Strided.scala:21:7, :39:23, :49:{20,59}] wire _last_delta_pos_T = io_snoop_bits_address_0 > last_snoop; // @[Strided.scala:21:7, :33:23, :50:63] wire _last_delta_pos_T_1 = io_snoop_valid_0 ? _last_delta_pos_T : last_delta_pos; // @[Strided.scala:21:7, :41:27, :50:{24,63}] wire _history_reset_T_2 = &reset_counter; // @[Strided.scala:43:30, :53:34] wire _history_cnt_T = &history_cnt; // @[Strided.scala:32:28, :60:38] wire [3:0] _GEN_0 = {1'h0, history_cnt}; // @[Strided.scala:32:28, :60:97] wire [3:0] _history_cnt_T_1 = _GEN_0 + 4'h1; // @[Strided.scala:60:97] wire [2:0] _history_cnt_T_2 = _history_cnt_T_1[2:0]; // @[Strided.scala:60:97] wire [2:0] _history_cnt_T_3 = _history_cnt_T ? history_cnt : _history_cnt_T_2; // @[Strided.scala:32:28, :60:{25,38,97}] wire _history_cnt_T_4 = history_cnt == 3'h0; // @[Strided.scala:32:28, :62:38] wire [3:0] _history_cnt_T_5 = _GEN_0 - 4'h1; // @[Strided.scala:60:97, :62:72] wire [2:0] _history_cnt_T_6 = _history_cnt_T_5[2:0]; // @[Strided.scala:62:72] wire [2:0] _history_cnt_T_7 = _history_cnt_T_4 ? history_cnt : _history_cnt_T_6; // @[Strided.scala:32:28, :62:{25,38,72}] wire [40:0] _prefetch_T = _GEN + {1'h0, last_delta}; // @[Strided.scala:39:23, :49:59, :72:30] wire [39:0] _prefetch_T_1 = _prefetch_T[39:0]; // @[Strided.scala:72:30] wire _state_T = history_cnt[2]; // @[Strided.scala:32:28, :82:21, :110:32] wire [40:0] _GEN_1 = {1'h0, prefetch}; // @[Strided.scala:30:21, :86:31] wire [40:0] _pref_far_enough_T = _GEN_1 - _GEN; // @[Strided.scala:49:59, :86:31] wire [39:0] _pref_far_enough_T_1 = _pref_far_enough_T[39:0]; // @[Strided.scala:86:31] wire _pref_far_enough_T_3 = |(_pref_far_enough_T_1[39:6]); // @[Strided.scala:86:{31,44}] wire _io_request_valid_T = state == 2'h2; // @[Strided.scala:28:22, :88:29] assign _io_request_valid_T_1 = _io_request_valid_T & pref_far_enough; // @[Strided.scala:37:32, :88:{29,42}] assign io_request_valid_0 = _io_request_valid_T_1; // @[Strided.scala:21:7, :88:42] wire [40:0] _GEN_2 = _GEN_1 + {1'h0, delta}; // @[Strided.scala:31:22, :86:31, :93:26] wire [40:0] _prefetch_T_2; // @[Strided.scala:93:26] assign _prefetch_T_2 = _GEN_2; // @[Strided.scala:93:26] wire [40:0] _prefetch_T_4; // @[Strided.scala:107:26] assign _prefetch_T_4 = _GEN_2; // @[Strided.scala:93:26, :107:26] wire [39:0] _prefetch_T_3 = _prefetch_T_2[39:0]; // @[Strided.scala:93:26] wire _reset_counter_T_2 = &reset_counter; // @[Strided.scala:43:30, :53:34, :102:42] wire [3:0] _reset_counter_T_3 = {1'h0, reset_counter} + 4'h1; // @[Strided.scala:43:30, :53:34, :102:93] wire [2:0] _reset_counter_T_4 = _reset_counter_T_3[2:0]; // @[Strided.scala:102:93] wire [2:0] _reset_counter_T_5 = _reset_counter_T_2 ? 3'h0 : _reset_counter_T_4; // @[Strided.scala:102:{27,42,93}] wire [39:0] _prefetch_T_5 = _prefetch_T_4[39:0]; // @[Strided.scala:107:26] wire [1:0] _state_T_1 = {_state_T, 1'h0}; // @[Strided.scala:110:{19,32}] wire [40:0] _T_37 = {delta, 1'h0}; // @[Strided.scala:31:22, :108:58] wire _T_15 = state == 2'h1; // @[Strided.scala:28:22, :96:15] wire [40:0] _T_24 = {delta, 1'h0}; // @[Strided.scala:31:22, :97:58] wire _T_27 = delta_pos & {1'h0, _pref_far_enough_T[39:0]} < _T_24 | ~delta_pos & {1'h0, last_snoop - prefetch} < _T_24; // @[Strided.scala:30:21, :33:23, :40:22, :86:31, :97:{22,35,49,58,74,78,89,104,116}] wire _T_7 = state == 2'h0; // @[Strided.scala:28:22, :66:14] wire _GEN_3 = _T_7 & history_cnt[2]; // @[Strided.scala:28:22, :32:28, :66:{14,26}, :69:{23,44}, :70:13] wire _T_28 = io_request_ready_0 & io_request_valid_0; // @[Decoupled.scala:51:35] always @(posedge clock) begin // @[Strided.scala:21:7] if (reset) begin // @[Strided.scala:21:7] state <= 2'h0; // @[Strided.scala:28:22] delta <= 40'h0; // @[Strided.scala:31:22] history_cnt <= 3'h0; // @[Strided.scala:32:28] pref_far_enough <= 1'h0; // @[Strided.scala:37:32] reset_counter <= 3'h0; // @[Strided.scala:43:30] end else begin // @[Strided.scala:21:7] if (_T_28) // @[Decoupled.scala:51:35] state <= delta_pos & {1'h0, _pref_far_enough_T[39:0]} < _T_37 | ~delta_pos & {1'h0, last_snoop - prefetch} < _T_37 ? _state_T_1 : 2'h1; // @[Strided.scala:28:22, :30:21, :33:23, :40:22, :86:31, :108:{22,35,49,58,74,78,89,104,116,142}, :110:{13,19}, :112:13] else if (_T_15 & _T_27) // @[Strided.scala:82:41, :96:{15,27}, :97:{74,142}, :98:13] state <= 2'h2; // @[Strided.scala:28:22] else if (_state_T) begin // @[Strided.scala:82:21, :110:32] if (_GEN_3) // @[Strided.scala:28:22, :66:26, :69:44, :70:13] state <= 2'h2; // @[Strided.scala:28:22] end else // @[Strided.scala:82:21] state <= 2'h0; // @[Strided.scala:28:22] if (~_T_7 | history_cnt[2]) begin // @[Strided.scala:31:22, :32:28, :66:{14,26}, :69:{23,44}] end else // @[Strided.scala:31:22, :66:26, :69:44] delta <= last_delta; // @[Strided.scala:31:22, :39:23] if (io_snoop_valid_0) // @[Strided.scala:21:7] history_cnt <= history_reset ? 3'h0 : delta == _last_delta_T[39:0] | delta == io_snoop_bits_address_0 - last_snoop_2 ? _history_cnt_T_3 : _history_cnt_T_7; // @[Strided.scala:21:7, :31:22, :32:28, :34:25, :44:26, :49:59, :57:25, :58:19, :59:{24,51,65,74,101,118}, :60:{19,25}, :62:{19,25}] pref_far_enough <= _pref_far_enough_T_3; // @[Strided.scala:37:32, :86:44] if (_T_15) begin // @[Strided.scala:96:15] if (_T_27) // @[Strided.scala:97:74] reset_counter <= 3'h0; // @[Strided.scala:43:30] else if (valid_delay) // @[Strided.scala:36:24] reset_counter <= _reset_counter_T_5; // @[Strided.scala:43:30, :102:27] else if (_T_7) // @[Strided.scala:66:14] reset_counter <= 3'h0; // @[Strided.scala:43:30] end else if (_T_7) // @[Strided.scala:66:14] reset_counter <= 3'h0; // @[Strided.scala:43:30] end if (_T_28) // @[Decoupled.scala:51:35] prefetch <= _prefetch_T_5; // @[Strided.scala:30:21, :107:26] else if (pref_far_enough) begin // @[Strided.scala:37:32] if (_GEN_3) // @[Strided.scala:28:22, :66:26, :69:44, :70:13] prefetch <= _prefetch_T_1; // @[Strided.scala:30:21, :72:30] end else // @[Strided.scala:37:32] prefetch <= _prefetch_T_3; // @[Strided.scala:30:21, :93:26] last_snoop <= _last_snoop_T; // @[Strided.scala:33:23, :46:20] last_snoop_2 <= _last_snoop_2_T; // @[Strided.scala:34:25, :47:22] last_write <= _last_write_T; // @[Strided.scala:35:23, :48:20] valid_delay <= io_snoop_valid_0; // @[Strided.scala:21:7, :36:24] last_delta <= _last_delta_T_2; // @[Strided.scala:39:23, :49:20] if (_GEN_3) // @[Strided.scala:28:22, :66:26, :69:44, :70:13] delta_pos <= last_delta_pos; // @[Strided.scala:40:22, :41:27] last_delta_pos <= _last_delta_pos_T_1; // @[Strided.scala:41:27, :50:24] history_reset <= _history_reset_T_2; // @[Strided.scala:44:26, :53:34] always @(posedge) assign io_request_valid = io_request_valid_0; // @[Strided.scala:21:7] assign io_request_bits_write = io_request_bits_write_0; // @[Strided.scala:21:7] assign io_request_bits_address = io_request_bits_address_0; // @[Strided.scala:21:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Nodes.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection} case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args)) object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle] { def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo) def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo) def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle) def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle) def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString) override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = { val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge))) monitor.io.in := bundle } override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters = pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }) override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters = pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }) } trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut] case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode case class TLAdapterNode( clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s }, managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLJunctionNode( clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters], managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])( implicit valName: ValName) extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode object TLNameNode { def apply(name: ValName) = TLIdentityNode()(name) def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLIdentityNode = apply(Some(name)) } case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)() object TLTempNode { def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp")) } case class TLNexusNode( clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters, managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)( implicit valName: ValName) extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode abstract class TLCustomNode(implicit valName: ValName) extends CustomNode(TLImp) with TLFormatNode // Asynchronous crossings trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters] object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle] { def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle) def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString) override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLAsyncAdapterNode( clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s }, managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode object TLAsyncNameNode { def apply(name: ValName) = TLAsyncIdentityNode()(name) def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLAsyncIdentityNode = apply(Some(name)) } case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLAsyncImp)( dFn = { p => TLAsyncClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName) extends MixedAdapterNode(TLAsyncImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) }, uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut] // Rationally related crossings trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters] object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle] { def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle) def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */) override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLRationalAdapterNode( clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s }, managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode object TLRationalNameNode { def apply(name: ValName) = TLRationalIdentityNode()(name) def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLRationalIdentityNode = apply(Some(name)) } case class TLRationalSourceNode()(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLRationalImp)( dFn = { p => TLRationalClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName) extends MixedAdapterNode(TLRationalImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut] // Credited version of TileLink channels trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters] object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle] { def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle) def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString) override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLCreditedAdapterNode( clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s }, managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode object TLCreditedNameNode { def apply(name: ValName) = TLCreditedIdentityNode()(name) def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLCreditedIdentityNode = apply(Some(name)) } case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLCreditedImp)( dFn = { p => TLCreditedClientPortParameters(delay, p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLCreditedImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut] File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File BusBypass.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.devices.tilelink import chisel3._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ abstract class TLBusBypassBase(beatBytes: Int, deadlock: Boolean = false, bufferError: Boolean = true, maxAtomic: Int = 16, maxTransfer: Int = 4096) (implicit p: Parameters) extends LazyModule { protected val nodeIn = TLIdentityNode() protected val nodeOut = TLIdentityNode() val node = NodeHandle(nodeIn, nodeOut) protected val bar = LazyModule(new TLBusBypassBar(dFn = { mp => mp.v1copy(managers = mp.managers.map { m => m.v1copy( mayDenyPut = m.mayDenyPut || !deadlock, mayDenyGet = m.mayDenyGet || !deadlock) }) })) protected val everything = Seq(AddressSet(0, BigInt("ffffffffffffffffffffffffffffffff", 16))) // 128-bit protected val params = DevNullParams(everything, maxAtomic, maxTransfer, region=RegionType.TRACKED) protected val error = if (deadlock) LazyModule(new TLDeadlock(params, beatBytes)) else LazyModule(new TLError(params, bufferError, beatBytes)) // order matters because the parameters and bypass // assume that the non-bypassed connection is // the last connection to the bar, so keep nodeOut last. bar.node := nodeIn error.node := bar.node nodeOut := bar.node } class TLBusBypass(beatBytes: Int, bufferError: Boolean = false, maxAtomic: Int = 16, maxTransfer: Int = 4096)(implicit p: Parameters) extends TLBusBypassBase(beatBytes, deadlock = false, bufferError = bufferError, maxAtomic = maxAtomic, maxTransfer = maxTransfer) { lazy val module = new Impl class Impl extends LazyModuleImp(this) { val io = IO(new Bundle { val bypass = Input(Bool()) }) bar.module.io.bypass := io.bypass } } class TLBypassNode(dFn: TLSlavePortParameters => TLSlavePortParameters)(implicit valName: ValName) extends TLCustomNode { def resolveStar(iKnown: Int, oKnown: Int, iStars: Int, oStars: Int): (Int, Int) = { require (iStars == 0 && oStars == 0, "TLBypass node does not support :=* or :*=") require (iKnown == 1, "TLBypass node expects exactly one input") require (oKnown == 2, "TLBypass node expects exactly two outputs") (0, 0) } def mapParamsD(n: Int, p: Seq[TLMasterPortParameters]): Seq[TLMasterPortParameters] = { p ++ p } def mapParamsU(n: Int, p: Seq[TLSlavePortParameters]): Seq[TLSlavePortParameters] = { Seq(dFn(p.last).v1copy(minLatency = p.map(_.minLatency).min))} } class TLBusBypassBar(dFn: TLSlavePortParameters => TLSlavePortParameters)(implicit p: Parameters) extends LazyModule { val node = new TLBypassNode(dFn) lazy val module = new Impl class Impl extends LazyModuleImp(this) { val io = IO(new Bundle { val bypass = Input(Bool()) val pending = Output(Bool()) }) val (in, edgeIn) = node.in(0) val Seq((out0, edgeOut0), (out1, edgeOut1)) = node.out require (edgeOut0.manager.beatBytes == edgeOut1.manager.beatBytes, s"BusBypass slave device widths mismatch (${edgeOut0.manager.managers.map(_.name)} has ${edgeOut0.manager.beatBytes}B vs ${edgeOut1.manager.managers.map(_.name)} has ${edgeOut1.manager.beatBytes}B)") // We need to be locked to the given bypass direction until all transactions stop val in_reset = RegNext(false.B, init = true.B) val bypass_reg = Reg(Bool()) val bypass = Mux(in_reset, io.bypass, bypass_reg) val (flight, next_flight) = edgeIn.inFlight(in) io.pending := (flight > 0.U) when (in_reset || (next_flight === 0.U)) { bypass_reg := io.bypass } val stall = (bypass =/= io.bypass) && edgeIn.first(in.a) out0.a.valid := !stall && in.a.valid && bypass out1.a.valid := !stall && in.a.valid && !bypass in.a.ready := !stall && Mux(bypass, out0.a.ready, out1.a.ready) out0.a.bits := in.a.bits out1.a.bits := in.a.bits out0.d.ready := in.d.ready && bypass out1.d.ready := in.d.ready && !bypass in.d.valid := Mux(bypass, out0.d.valid, out1.d.valid) def cast(x: TLBundleD) = { val out = WireDefault(in.d.bits); out <> x; out } in.d.bits := Mux(bypass, cast(out0.d.bits), cast(out1.d.bits)) if (edgeIn.manager.anySupportAcquireB && edgeIn.client.anySupportProbe) { out0.b.ready := in.b.ready && bypass out1.b.ready := in.b.ready && !bypass in.b.valid := Mux(bypass, out0.b.valid, out1.b.valid) def cast(x: TLBundleB) = { val out = Wire(in.b.bits); out <> x; out } in.b.bits := Mux(bypass, cast(out0.b.bits), cast(out1.b.bits)) out0.c.valid := in.c.valid && bypass out1.c.valid := in.c.valid && !bypass in.c.ready := Mux(bypass, out0.c.ready, out1.c.ready) out0.c.bits := in.c.bits out1.c.bits := in.c.bits out0.e.valid := in.e.valid && bypass out1.e.valid := in.e.valid && !bypass in.e.ready := Mux(bypass, out0.e.ready, out1.e.ready) out0.e.bits := in.e.bits out1.e.bits := in.e.bits } else { in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out0.b.ready := true.B out0.c.valid := false.B out0.e.valid := false.B out1.b.ready := true.B out1.c.valid := false.B out1.e.valid := false.B } } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLBusBypassBar( // @[BusBypass.scala:66:9] input clock, // @[BusBypass.scala:66:9] input reset, // @[BusBypass.scala:66:9] output auto_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [8:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [31:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_in_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25] output [1:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25] output auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25] output [31:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_out_1_a_ready, // @[LazyModuleImp.scala:107:25] output auto_out_1_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_1_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [8:0] auto_out_1_a_bits_address, // @[LazyModuleImp.scala:107:25] output [31:0] auto_out_1_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_out_1_d_ready, // @[LazyModuleImp.scala:107:25] input auto_out_1_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_1_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_1_d_bits_param, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_1_d_bits_size, // @[LazyModuleImp.scala:107:25] input auto_out_1_d_bits_source, // @[LazyModuleImp.scala:107:25] input auto_out_1_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_out_1_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [31:0] auto_out_1_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_out_1_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_out_0_a_ready, // @[LazyModuleImp.scala:107:25] output auto_out_0_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_0_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [127:0] auto_out_0_a_bits_address, // @[LazyModuleImp.scala:107:25] output [31:0] auto_out_0_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_out_0_d_ready, // @[LazyModuleImp.scala:107:25] input auto_out_0_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_0_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_0_d_bits_param, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_0_d_bits_size, // @[LazyModuleImp.scala:107:25] input auto_out_0_d_bits_denied, // @[LazyModuleImp.scala:107:25] input auto_out_0_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input io_bypass // @[BusBypass.scala:67:16] ); wire auto_in_a_valid_0 = auto_in_a_valid; // @[BusBypass.scala:66:9] wire [2:0] auto_in_a_bits_opcode_0 = auto_in_a_bits_opcode; // @[BusBypass.scala:66:9] wire [8:0] auto_in_a_bits_address_0 = auto_in_a_bits_address; // @[BusBypass.scala:66:9] wire [31:0] auto_in_a_bits_data_0 = auto_in_a_bits_data; // @[BusBypass.scala:66:9] wire auto_in_d_ready_0 = auto_in_d_ready; // @[BusBypass.scala:66:9] wire auto_out_1_a_ready_0 = auto_out_1_a_ready; // @[BusBypass.scala:66:9] wire auto_out_1_d_valid_0 = auto_out_1_d_valid; // @[BusBypass.scala:66:9] wire [2:0] auto_out_1_d_bits_opcode_0 = auto_out_1_d_bits_opcode; // @[BusBypass.scala:66:9] wire [1:0] auto_out_1_d_bits_param_0 = auto_out_1_d_bits_param; // @[BusBypass.scala:66:9] wire [1:0] auto_out_1_d_bits_size_0 = auto_out_1_d_bits_size; // @[BusBypass.scala:66:9] wire auto_out_1_d_bits_source_0 = auto_out_1_d_bits_source; // @[BusBypass.scala:66:9] wire auto_out_1_d_bits_sink_0 = auto_out_1_d_bits_sink; // @[BusBypass.scala:66:9] wire auto_out_1_d_bits_denied_0 = auto_out_1_d_bits_denied; // @[BusBypass.scala:66:9] wire [31:0] auto_out_1_d_bits_data_0 = auto_out_1_d_bits_data; // @[BusBypass.scala:66:9] wire auto_out_1_d_bits_corrupt_0 = auto_out_1_d_bits_corrupt; // @[BusBypass.scala:66:9] wire auto_out_0_a_ready_0 = auto_out_0_a_ready; // @[BusBypass.scala:66:9] wire auto_out_0_d_valid_0 = auto_out_0_d_valid; // @[BusBypass.scala:66:9] wire [2:0] auto_out_0_d_bits_opcode_0 = auto_out_0_d_bits_opcode; // @[BusBypass.scala:66:9] wire [1:0] auto_out_0_d_bits_param_0 = auto_out_0_d_bits_param; // @[BusBypass.scala:66:9] wire [1:0] auto_out_0_d_bits_size_0 = auto_out_0_d_bits_size; // @[BusBypass.scala:66:9] wire auto_out_0_d_bits_denied_0 = auto_out_0_d_bits_denied; // @[BusBypass.scala:66:9] wire auto_out_0_d_bits_corrupt_0 = auto_out_0_d_bits_corrupt; // @[BusBypass.scala:66:9] wire io_bypass_0 = io_bypass; // @[BusBypass.scala:66:9] wire [4:0] _r_beats1_decode_T_3 = 5'h3; // @[package.scala:243:71] wire [4:0] _r_beats1_decode_T_6 = 5'h3; // @[package.scala:243:71] wire [3:0] _b_inc_WIRE_bits_mask = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _b_inc_WIRE_1_bits_mask = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _b_dec_WIRE_bits_mask = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _b_dec_WIRE_1_bits_mask = 4'h0; // @[Bundles.scala:264:61] wire [8:0] _b_inc_WIRE_bits_address = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _b_inc_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:264:61] wire [8:0] _c_inc_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_inc_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _b_dec_WIRE_bits_address = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _b_dec_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:264:61] wire [8:0] _c_dec_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_dec_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61] wire [4:0] _r_beats1_decode_T = 5'hC; // @[package.scala:243:71] wire [4:0] _stall_beats1_decode_T = 5'hC; // @[package.scala:243:71] wire [1:0] _r_beats1_decode_T_1 = 2'h0; // @[package.scala:243:76] wire [1:0] _r_beats1_decode_T_5 = 2'h0; // @[package.scala:243:46] wire [1:0] _r_beats1_decode_T_8 = 2'h0; // @[package.scala:243:46] wire [1:0] _b_inc_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _b_inc_WIRE_bits_size = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _b_inc_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _b_inc_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _c_inc_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_inc_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _b_dec_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _b_dec_WIRE_bits_size = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _b_dec_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _b_dec_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _c_dec_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_dec_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _stall_beats1_decode_T_1 = 2'h0; // @[package.scala:243:76] wire [31:0] auto_out_0_d_bits_data = 32'h0; // @[BusBypass.scala:66:9] wire [31:0] nodeOut_d_bits_data = 32'h0; // @[MixedNode.scala:542:17] wire [31:0] _b_inc_WIRE_bits_data = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _b_inc_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _c_inc_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_inc_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _b_dec_WIRE_bits_data = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _b_dec_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _c_dec_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_dec_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] nodeIn_d_bits_out_data = 32'h0; // @[BusBypass.scala:97:53] wire [3:0] auto_in_a_bits_mask = 4'hF; // @[Nodes.scala:27:25] wire [3:0] auto_out_1_a_bits_mask = 4'hF; // @[Nodes.scala:27:25] wire [3:0] auto_out_0_a_bits_mask = 4'hF; // @[Nodes.scala:27:25] wire [3:0] nodeIn_a_bits_mask = 4'hF; // @[Nodes.scala:27:25] wire [3:0] nodeOut_a_bits_mask = 4'hF; // @[Nodes.scala:27:25] wire [3:0] x1_nodeOut_a_bits_mask = 4'hF; // @[Nodes.scala:27:25] wire [1:0] auto_in_a_bits_size = 2'h2; // @[Nodes.scala:27:25] wire [1:0] auto_out_1_a_bits_size = 2'h2; // @[Nodes.scala:27:25] wire [1:0] auto_out_0_a_bits_size = 2'h2; // @[Nodes.scala:27:25] wire [1:0] nodeIn_a_bits_size = 2'h2; // @[Nodes.scala:27:25] wire [1:0] nodeOut_a_bits_size = 2'h2; // @[Nodes.scala:27:25] wire [1:0] x1_nodeOut_a_bits_size = 2'h2; // @[Nodes.scala:27:25] wire [2:0] auto_in_a_bits_param = 3'h0; // @[BusBypass.scala:66:9] wire [2:0] auto_out_1_a_bits_param = 3'h0; // @[BusBypass.scala:66:9] wire [2:0] auto_out_0_a_bits_param = 3'h0; // @[BusBypass.scala:66:9] wire [2:0] nodeIn_a_bits_param = 3'h0; // @[MixedNode.scala:551:17] wire [2:0] nodeOut_a_bits_param = 3'h0; // @[MixedNode.scala:542:17] wire [2:0] x1_nodeOut_a_bits_param = 3'h0; // @[MixedNode.scala:542:17] wire [2:0] _b_inc_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _b_inc_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _c_inc_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_inc_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_inc_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_inc_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _b_dec_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _b_dec_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _c_dec_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_dec_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_dec_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_dec_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [1:0] _r_beats1_decode_T_2 = 2'h3; // @[package.scala:243:46] wire [1:0] _r_beats1_decode_T_4 = 2'h3; // @[package.scala:243:76] wire [1:0] _r_counter1_T_1 = 2'h3; // @[Edges.scala:230:28] wire [1:0] _r_beats1_decode_T_7 = 2'h3; // @[package.scala:243:76] wire [1:0] _r_counter1_T_2 = 2'h3; // @[Edges.scala:230:28] wire [1:0] _r_counter1_T_4 = 2'h3; // @[Edges.scala:230:28] wire [1:0] _stall_beats1_decode_T_2 = 2'h3; // @[package.scala:243:46] wire _r_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire a_last = 1'h1; // @[Edges.scala:232:33] wire r_beats1_opdata_1 = 1'h1; // @[Edges.scala:97:28] wire r_counter1_1 = 1'h1; // @[Edges.scala:230:28] wire b_first = 1'h1; // @[Edges.scala:231:25] wire _r_last_T_3 = 1'h1; // @[Edges.scala:232:43] wire b_last = 1'h1; // @[Edges.scala:232:33] wire r_counter1_2 = 1'h1; // @[Edges.scala:230:28] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _r_last_T_5 = 1'h1; // @[Edges.scala:232:43] wire c_last = 1'h1; // @[Edges.scala:232:33] wire _r_last_T_7 = 1'h1; // @[Edges.scala:232:43] wire d_last = 1'h1; // @[Edges.scala:232:33] wire r_counter1_4 = 1'h1; // @[Edges.scala:230:28] wire e_first = 1'h1; // @[Edges.scala:231:25] wire _r_last_T_9 = 1'h1; // @[Edges.scala:232:43] wire e_last = 1'h1; // @[Edges.scala:232:33] wire c_response = 1'h1; // @[Edges.scala:82:41] wire _stall_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire stall_last = 1'h1; // @[Edges.scala:232:33] wire auto_in_a_bits_source = 1'h0; // @[BusBypass.scala:66:9] wire auto_in_a_bits_corrupt = 1'h0; // @[BusBypass.scala:66:9] wire auto_out_1_a_bits_source = 1'h0; // @[BusBypass.scala:66:9] wire auto_out_1_a_bits_corrupt = 1'h0; // @[BusBypass.scala:66:9] wire auto_out_0_a_bits_source = 1'h0; // @[BusBypass.scala:66:9] wire auto_out_0_a_bits_corrupt = 1'h0; // @[BusBypass.scala:66:9] wire auto_out_0_d_bits_source = 1'h0; // @[BusBypass.scala:66:9] wire auto_out_0_d_bits_sink = 1'h0; // @[BusBypass.scala:66:9] wire nodeIn_a_ready; // @[MixedNode.scala:551:17] wire nodeIn_a_bits_source = 1'h0; // @[MixedNode.scala:551:17] wire nodeIn_a_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17] wire nodeOut_a_bits_source = 1'h0; // @[MixedNode.scala:542:17] wire nodeOut_a_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire nodeOut_d_bits_source = 1'h0; // @[MixedNode.scala:542:17] wire nodeOut_d_bits_sink = 1'h0; // @[MixedNode.scala:542:17] wire x1_nodeOut_a_bits_source = 1'h0; // @[MixedNode.scala:542:17] wire x1_nodeOut_a_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire r_beats1_decode = 1'h0; // @[Edges.scala:220:59] wire r_beats1 = 1'h0; // @[Edges.scala:221:14] wire r_4 = 1'h0; // @[Edges.scala:234:25] wire r_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59] wire _r_beats1_opdata_T_1 = 1'h0; // @[Edges.scala:97:37] wire r_beats1_1 = 1'h0; // @[Edges.scala:221:14] wire _r_last_T_2 = 1'h0; // @[Edges.scala:232:25] wire r_3_1 = 1'h0; // @[Edges.scala:233:22] wire _r_count_T_1 = 1'h0; // @[Edges.scala:234:27] wire r_4_1 = 1'h0; // @[Edges.scala:234:25] wire _r_counter_T_1 = 1'h0; // @[Edges.scala:236:21] wire r_beats1_decode_2 = 1'h0; // @[Edges.scala:220:59] wire r_beats1_opdata_2 = 1'h0; // @[Edges.scala:102:36] wire r_beats1_2 = 1'h0; // @[Edges.scala:221:14] wire _r_last_T_4 = 1'h0; // @[Edges.scala:232:25] wire r_3_2 = 1'h0; // @[Edges.scala:233:22] wire _r_count_T_2 = 1'h0; // @[Edges.scala:234:27] wire r_4_2 = 1'h0; // @[Edges.scala:234:25] wire _r_counter_T_2 = 1'h0; // @[Edges.scala:236:21] wire r_beats1_decode_3 = 1'h0; // @[Edges.scala:220:59] wire r_beats1_3 = 1'h0; // @[Edges.scala:221:14] wire r_4_3 = 1'h0; // @[Edges.scala:234:25] wire _r_last_T_8 = 1'h0; // @[Edges.scala:232:25] wire r_3_4 = 1'h0; // @[Edges.scala:233:22] wire _r_count_T_4 = 1'h0; // @[Edges.scala:234:27] wire r_4_4 = 1'h0; // @[Edges.scala:234:25] wire _r_counter_T_4 = 1'h0; // @[Edges.scala:236:21] wire c_request = 1'h0; // @[Edges.scala:68:40] wire _b_inc_WIRE_ready = 1'h0; // @[Bundles.scala:264:74] wire _b_inc_WIRE_valid = 1'h0; // @[Bundles.scala:264:74] wire _b_inc_WIRE_bits_source = 1'h0; // @[Bundles.scala:264:74] wire _b_inc_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _b_inc_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61] wire _b_inc_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61] wire _b_inc_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:264:61] wire _b_inc_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _b_inc_T = 1'h0; // @[Decoupled.scala:51:35] wire _b_inc_T_1 = 1'h0; // @[Edges.scala:311:26] wire b_inc = 1'h0; // @[Edges.scala:311:37] wire _c_inc_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_inc_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_inc_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_inc_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_inc_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_inc_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_inc_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_inc_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_inc_T = 1'h0; // @[Decoupled.scala:51:35] wire _c_inc_T_1 = 1'h0; // @[Edges.scala:312:26] wire c_inc = 1'h0; // @[Edges.scala:312:37] wire _e_inc_WIRE_ready = 1'h0; // @[Bundles.scala:267:74] wire _e_inc_WIRE_valid = 1'h0; // @[Bundles.scala:267:74] wire _e_inc_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _e_inc_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61] wire _e_inc_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61] wire _e_inc_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _e_inc_T = 1'h0; // @[Decoupled.scala:51:35] wire _e_inc_T_1 = 1'h0; // @[Edges.scala:314:26] wire e_inc = 1'h0; // @[Edges.scala:314:37] wire a_dec = 1'h0; // @[Edges.scala:317:36] wire _b_dec_WIRE_ready = 1'h0; // @[Bundles.scala:264:74] wire _b_dec_WIRE_valid = 1'h0; // @[Bundles.scala:264:74] wire _b_dec_WIRE_bits_source = 1'h0; // @[Bundles.scala:264:74] wire _b_dec_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _b_dec_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61] wire _b_dec_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61] wire _b_dec_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:264:61] wire _b_dec_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _b_dec_T = 1'h0; // @[Decoupled.scala:51:35] wire _b_dec_T_1 = 1'h0; // @[Edges.scala:318:26] wire b_dec = 1'h0; // @[Edges.scala:318:36] wire _c_dec_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_dec_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_dec_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_dec_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_dec_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_dec_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_dec_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_dec_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_dec_T = 1'h0; // @[Decoupled.scala:51:35] wire _c_dec_T_1 = 1'h0; // @[Edges.scala:319:26] wire c_dec = 1'h0; // @[Edges.scala:319:36] wire _e_dec_WIRE_ready = 1'h0; // @[Bundles.scala:267:74] wire _e_dec_WIRE_valid = 1'h0; // @[Bundles.scala:267:74] wire _e_dec_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _e_dec_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61] wire _e_dec_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61] wire _e_dec_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _e_dec_T = 1'h0; // @[Decoupled.scala:51:35] wire _e_dec_T_1 = 1'h0; // @[Edges.scala:321:26] wire e_dec = 1'h0; // @[Edges.scala:321:36] wire stall_beats1_decode = 1'h0; // @[Edges.scala:220:59] wire stall_beats1 = 1'h0; // @[Edges.scala:221:14] wire stall_count = 1'h0; // @[Edges.scala:234:25] wire nodeIn_d_bits_out_source = 1'h0; // @[BusBypass.scala:97:53] wire nodeIn_d_bits_out_sink = 1'h0; // @[BusBypass.scala:97:53] wire nodeIn_a_valid = auto_in_a_valid_0; // @[BusBypass.scala:66:9] wire [2:0] nodeIn_a_bits_opcode = auto_in_a_bits_opcode_0; // @[BusBypass.scala:66:9] wire [8:0] nodeIn_a_bits_address = auto_in_a_bits_address_0; // @[BusBypass.scala:66:9] wire [31:0] nodeIn_a_bits_data = auto_in_a_bits_data_0; // @[BusBypass.scala:66:9] wire nodeIn_d_ready = auto_in_d_ready_0; // @[BusBypass.scala:66:9] wire nodeIn_d_valid; // @[MixedNode.scala:551:17] wire [2:0] nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17] wire [1:0] nodeIn_d_bits_param; // @[MixedNode.scala:551:17] wire [1:0] nodeIn_d_bits_size; // @[MixedNode.scala:551:17] wire nodeIn_d_bits_source; // @[MixedNode.scala:551:17] wire nodeIn_d_bits_sink; // @[MixedNode.scala:551:17] wire nodeIn_d_bits_denied; // @[MixedNode.scala:551:17] wire [31:0] nodeIn_d_bits_data; // @[MixedNode.scala:551:17] wire nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17] wire x1_nodeOut_a_ready = auto_out_1_a_ready_0; // @[BusBypass.scala:66:9] wire x1_nodeOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] x1_nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [8:0] x1_nodeOut_a_bits_address; // @[MixedNode.scala:542:17] wire [31:0] x1_nodeOut_a_bits_data; // @[MixedNode.scala:542:17] wire x1_nodeOut_d_ready; // @[MixedNode.scala:542:17] wire x1_nodeOut_d_valid = auto_out_1_d_valid_0; // @[BusBypass.scala:66:9] wire [2:0] x1_nodeOut_d_bits_opcode = auto_out_1_d_bits_opcode_0; // @[BusBypass.scala:66:9] wire [1:0] x1_nodeOut_d_bits_param = auto_out_1_d_bits_param_0; // @[BusBypass.scala:66:9] wire [1:0] x1_nodeOut_d_bits_size = auto_out_1_d_bits_size_0; // @[BusBypass.scala:66:9] wire x1_nodeOut_d_bits_source = auto_out_1_d_bits_source_0; // @[BusBypass.scala:66:9] wire x1_nodeOut_d_bits_sink = auto_out_1_d_bits_sink_0; // @[BusBypass.scala:66:9] wire x1_nodeOut_d_bits_denied = auto_out_1_d_bits_denied_0; // @[BusBypass.scala:66:9] wire [31:0] x1_nodeOut_d_bits_data = auto_out_1_d_bits_data_0; // @[BusBypass.scala:66:9] wire x1_nodeOut_d_bits_corrupt = auto_out_1_d_bits_corrupt_0; // @[BusBypass.scala:66:9] wire nodeOut_a_ready = auto_out_0_a_ready_0; // @[BusBypass.scala:66:9] wire nodeOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [127:0] nodeOut_a_bits_address; // @[MixedNode.scala:542:17] wire [31:0] nodeOut_a_bits_data; // @[MixedNode.scala:542:17] wire nodeOut_d_ready; // @[MixedNode.scala:542:17] wire nodeOut_d_valid = auto_out_0_d_valid_0; // @[BusBypass.scala:66:9] wire [2:0] nodeOut_d_bits_opcode = auto_out_0_d_bits_opcode_0; // @[BusBypass.scala:66:9] wire [1:0] nodeOut_d_bits_param = auto_out_0_d_bits_param_0; // @[BusBypass.scala:66:9] wire [1:0] nodeOut_d_bits_size = auto_out_0_d_bits_size_0; // @[BusBypass.scala:66:9] wire nodeOut_d_bits_denied = auto_out_0_d_bits_denied_0; // @[BusBypass.scala:66:9] wire nodeOut_d_bits_corrupt = auto_out_0_d_bits_corrupt_0; // @[BusBypass.scala:66:9] wire _io_pending_T; // @[BusBypass.scala:84:27] wire auto_in_a_ready_0; // @[BusBypass.scala:66:9] wire [2:0] auto_in_d_bits_opcode_0; // @[BusBypass.scala:66:9] wire [1:0] auto_in_d_bits_param_0; // @[BusBypass.scala:66:9] wire [1:0] auto_in_d_bits_size_0; // @[BusBypass.scala:66:9] wire auto_in_d_bits_source_0; // @[BusBypass.scala:66:9] wire auto_in_d_bits_sink_0; // @[BusBypass.scala:66:9] wire auto_in_d_bits_denied_0; // @[BusBypass.scala:66:9] wire [31:0] auto_in_d_bits_data_0; // @[BusBypass.scala:66:9] wire auto_in_d_bits_corrupt_0; // @[BusBypass.scala:66:9] wire auto_in_d_valid_0; // @[BusBypass.scala:66:9] wire [2:0] auto_out_1_a_bits_opcode_0; // @[BusBypass.scala:66:9] wire [8:0] auto_out_1_a_bits_address_0; // @[BusBypass.scala:66:9] wire [31:0] auto_out_1_a_bits_data_0; // @[BusBypass.scala:66:9] wire auto_out_1_a_valid_0; // @[BusBypass.scala:66:9] wire auto_out_1_d_ready_0; // @[BusBypass.scala:66:9] wire [2:0] auto_out_0_a_bits_opcode_0; // @[BusBypass.scala:66:9] wire [127:0] auto_out_0_a_bits_address_0; // @[BusBypass.scala:66:9] wire [31:0] auto_out_0_a_bits_data_0; // @[BusBypass.scala:66:9] wire auto_out_0_a_valid_0; // @[BusBypass.scala:66:9] wire auto_out_0_d_ready_0; // @[BusBypass.scala:66:9] wire io_pending; // @[BusBypass.scala:66:9] wire _nodeIn_a_ready_T_2; // @[BusBypass.scala:90:28] assign auto_in_a_ready_0 = nodeIn_a_ready; // @[BusBypass.scala:66:9] assign nodeOut_a_bits_opcode = nodeIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign x1_nodeOut_a_bits_opcode = nodeIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign x1_nodeOut_a_bits_address = nodeIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_a_bits_data = nodeIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17] assign x1_nodeOut_a_bits_data = nodeIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17] wire _nodeIn_d_valid_T; // @[BusBypass.scala:96:24] assign auto_in_d_valid_0 = nodeIn_d_valid; // @[BusBypass.scala:66:9] wire [2:0] _nodeIn_d_bits_T_opcode; // @[BusBypass.scala:98:21] assign auto_in_d_bits_opcode_0 = nodeIn_d_bits_opcode; // @[BusBypass.scala:66:9] wire [1:0] _nodeIn_d_bits_T_param; // @[BusBypass.scala:98:21] assign auto_in_d_bits_param_0 = nodeIn_d_bits_param; // @[BusBypass.scala:66:9] wire [1:0] _nodeIn_d_bits_T_size; // @[BusBypass.scala:98:21] assign auto_in_d_bits_size_0 = nodeIn_d_bits_size; // @[BusBypass.scala:66:9] wire _nodeIn_d_bits_T_source; // @[BusBypass.scala:98:21] assign auto_in_d_bits_source_0 = nodeIn_d_bits_source; // @[BusBypass.scala:66:9] wire _nodeIn_d_bits_T_sink; // @[BusBypass.scala:98:21] assign auto_in_d_bits_sink_0 = nodeIn_d_bits_sink; // @[BusBypass.scala:66:9] wire _nodeIn_d_bits_T_denied; // @[BusBypass.scala:98:21] assign auto_in_d_bits_denied_0 = nodeIn_d_bits_denied; // @[BusBypass.scala:66:9] wire [31:0] _nodeIn_d_bits_T_data; // @[BusBypass.scala:98:21] assign auto_in_d_bits_data_0 = nodeIn_d_bits_data; // @[BusBypass.scala:66:9] wire _nodeIn_d_bits_T_corrupt; // @[BusBypass.scala:98:21] assign auto_in_d_bits_corrupt_0 = nodeIn_d_bits_corrupt; // @[BusBypass.scala:66:9] wire _nodeOut_a_valid_T_2; // @[BusBypass.scala:88:42] assign auto_out_0_a_valid_0 = nodeOut_a_valid; // @[BusBypass.scala:66:9] assign auto_out_0_a_bits_opcode_0 = nodeOut_a_bits_opcode; // @[BusBypass.scala:66:9] assign auto_out_0_a_bits_address_0 = nodeOut_a_bits_address; // @[BusBypass.scala:66:9] assign auto_out_0_a_bits_data_0 = nodeOut_a_bits_data; // @[BusBypass.scala:66:9] wire _nodeOut_d_ready_T; // @[BusBypass.scala:94:32] assign auto_out_0_d_ready_0 = nodeOut_d_ready; // @[BusBypass.scala:66:9] wire [2:0] nodeIn_d_bits_out_opcode = nodeOut_d_bits_opcode; // @[BusBypass.scala:97:53] wire [1:0] nodeIn_d_bits_out_param = nodeOut_d_bits_param; // @[BusBypass.scala:97:53] wire [1:0] nodeIn_d_bits_out_size = nodeOut_d_bits_size; // @[BusBypass.scala:97:53] wire nodeIn_d_bits_out_denied = nodeOut_d_bits_denied; // @[BusBypass.scala:97:53] wire nodeIn_d_bits_out_corrupt = nodeOut_d_bits_corrupt; // @[BusBypass.scala:97:53] wire _nodeOut_a_valid_T_6; // @[BusBypass.scala:89:42] assign auto_out_1_a_valid_0 = x1_nodeOut_a_valid; // @[BusBypass.scala:66:9] assign auto_out_1_a_bits_opcode_0 = x1_nodeOut_a_bits_opcode; // @[BusBypass.scala:66:9] assign auto_out_1_a_bits_address_0 = x1_nodeOut_a_bits_address; // @[BusBypass.scala:66:9] assign auto_out_1_a_bits_data_0 = x1_nodeOut_a_bits_data; // @[BusBypass.scala:66:9] wire _nodeOut_d_ready_T_2; // @[BusBypass.scala:95:32] assign auto_out_1_d_ready_0 = x1_nodeOut_d_ready; // @[BusBypass.scala:66:9] wire [2:0] nodeIn_d_bits_out_1_opcode = x1_nodeOut_d_bits_opcode; // @[BusBypass.scala:97:53] wire [1:0] nodeIn_d_bits_out_1_param = x1_nodeOut_d_bits_param; // @[BusBypass.scala:97:53] wire [1:0] nodeIn_d_bits_out_1_size = x1_nodeOut_d_bits_size; // @[BusBypass.scala:97:53] wire nodeIn_d_bits_out_1_source = x1_nodeOut_d_bits_source; // @[BusBypass.scala:97:53] wire nodeIn_d_bits_out_1_sink = x1_nodeOut_d_bits_sink; // @[BusBypass.scala:97:53] wire nodeIn_d_bits_out_1_denied = x1_nodeOut_d_bits_denied; // @[BusBypass.scala:97:53] wire [31:0] nodeIn_d_bits_out_1_data = x1_nodeOut_d_bits_data; // @[BusBypass.scala:97:53] wire nodeIn_d_bits_out_1_corrupt = x1_nodeOut_d_bits_corrupt; // @[BusBypass.scala:97:53] reg in_reset; // @[BusBypass.scala:79:27] reg bypass_reg; // @[BusBypass.scala:80:25] wire bypass = in_reset ? io_bypass_0 : bypass_reg; // @[BusBypass.scala:66:9, :79:27, :80:25, :81:21] reg [1:0] flight; // @[Edges.scala:295:25] wire _T = nodeIn_a_ready & nodeIn_a_valid; // @[Decoupled.scala:51:35] wire r_3; // @[Edges.scala:233:22] assign r_3 = _T; // @[Decoupled.scala:51:35] wire _a_inc_T; // @[Decoupled.scala:51:35] assign _a_inc_T = _T; // @[Decoupled.scala:51:35] wire _a_dec_T; // @[Decoupled.scala:51:35] assign _a_dec_T = _T; // @[Decoupled.scala:51:35] wire _stall_T_1; // @[Decoupled.scala:51:35] assign _stall_T_1 = _T; // @[Decoupled.scala:51:35] wire _r_beats1_opdata_T = nodeIn_a_bits_opcode[2]; // @[Edges.scala:92:37] wire _stall_beats1_opdata_T = nodeIn_a_bits_opcode[2]; // @[Edges.scala:92:37] wire r_beats1_opdata = ~_r_beats1_opdata_T; // @[Edges.scala:92:{28,37}] reg r_counter; // @[Edges.scala:229:27] wire _r_last_T = r_counter; // @[Edges.scala:229:27, :232:25] wire [1:0] _r_counter1_T = {1'h0, r_counter} - 2'h1; // @[Edges.scala:229:27, :230:28] wire r_counter1 = _r_counter1_T[0]; // @[Edges.scala:230:28] wire a_first = ~r_counter; // @[Edges.scala:229:27, :231:25] wire _r_count_T = ~r_counter1; // @[Edges.scala:230:28, :234:27] wire _r_counter_T = ~a_first & r_counter1; // @[Edges.scala:230:28, :231:25, :236:21] wire _T_3 = nodeIn_d_ready & nodeIn_d_valid; // @[Decoupled.scala:51:35] wire r_3_3; // @[Edges.scala:233:22] assign r_3_3 = _T_3; // @[Decoupled.scala:51:35] wire _d_inc_T; // @[Decoupled.scala:51:35] assign _d_inc_T = _T_3; // @[Decoupled.scala:51:35] wire _d_dec_T; // @[Decoupled.scala:51:35] assign _d_dec_T = _T_3; // @[Decoupled.scala:51:35] wire [4:0] _r_beats1_decode_T_9 = 5'h3 << nodeIn_d_bits_size; // @[package.scala:243:71] wire [1:0] _r_beats1_decode_T_10 = _r_beats1_decode_T_9[1:0]; // @[package.scala:243:{71,76}] wire [1:0] _r_beats1_decode_T_11 = ~_r_beats1_decode_T_10; // @[package.scala:243:{46,76}] wire r_beats1_opdata_3 = nodeIn_d_bits_opcode[0]; // @[Edges.scala:106:36] reg r_counter_3; // @[Edges.scala:229:27] wire _r_last_T_6 = r_counter_3; // @[Edges.scala:229:27, :232:25] wire [1:0] _r_counter1_T_3 = {1'h0, r_counter_3} - 2'h1; // @[Edges.scala:229:27, :230:28] wire r_counter1_3 = _r_counter1_T_3[0]; // @[Edges.scala:230:28] wire d_first = ~r_counter_3; // @[Edges.scala:229:27, :231:25] wire _r_count_T_3 = ~r_counter1_3; // @[Edges.scala:230:28, :234:27] wire _r_counter_T_3 = ~d_first & r_counter1_3; // @[Edges.scala:230:28, :231:25, :236:21] wire d_request = nodeIn_d_bits_opcode[2] & ~(nodeIn_d_bits_opcode[1]); // @[Edges.scala:71:{36,40,43,52}] wire _a_inc_T_1 = _a_inc_T & a_first; // @[Decoupled.scala:51:35] wire a_inc = _a_inc_T_1; // @[Edges.scala:310:{26,37}] wire _d_inc_T_1 = _d_inc_T & d_first; // @[Decoupled.scala:51:35] wire d_inc = _d_inc_T_1 & d_request; // @[Edges.scala:71:40, :313:{26,37}] wire [1:0] inc = {a_inc, d_inc}; // @[Edges.scala:310:37, :313:37, :315:18] wire _a_dec_T_1 = _a_dec_T; // @[Decoupled.scala:51:35] wire _d_dec_T_1 = _d_dec_T; // @[Decoupled.scala:51:35] wire d_dec = _d_dec_T_1; // @[Edges.scala:320:{26,36}] wire [1:0] dec = {1'h0, d_dec}; // @[Edges.scala:320:36, :322:18] wire _next_flight_T = inc[0]; // @[Edges.scala:315:18, :324:40] wire _next_flight_T_1 = inc[1]; // @[Edges.scala:315:18, :324:40] wire [1:0] _next_flight_T_2 = {1'h0, _next_flight_T} + {1'h0, _next_flight_T_1}; // @[Edges.scala:324:40] wire [1:0] _next_flight_T_3 = _next_flight_T_2; // @[Edges.scala:324:40] wire [2:0] _next_flight_T_4 = {1'h0, flight} + {1'h0, _next_flight_T_3}; // @[Edges.scala:295:25, :324:{30,40}] wire [1:0] _next_flight_T_5 = _next_flight_T_4[1:0]; // @[Edges.scala:324:30] wire _next_flight_T_6 = dec[0]; // @[Edges.scala:322:18, :324:56] wire _next_flight_T_7 = dec[1]; // @[Edges.scala:322:18, :324:56] wire [1:0] _next_flight_T_8 = {1'h0, _next_flight_T_6} + {1'h0, _next_flight_T_7}; // @[Edges.scala:324:56] wire [1:0] _next_flight_T_9 = _next_flight_T_8; // @[Edges.scala:324:56] wire [2:0] _next_flight_T_10 = {1'h0, _next_flight_T_5} - {1'h0, _next_flight_T_9}; // @[Edges.scala:324:{30,46,56}] wire [1:0] next_flight = _next_flight_T_10[1:0]; // @[Edges.scala:324:46] assign _io_pending_T = |flight; // @[Edges.scala:295:25] assign io_pending = _io_pending_T; // @[BusBypass.scala:66:9, :84:27] wire _stall_T = bypass != io_bypass_0; // @[BusBypass.scala:66:9, :81:21, :86:25] wire stall_done = _stall_T_1; // @[Decoupled.scala:51:35] wire stall_beats1_opdata = ~_stall_beats1_opdata_T; // @[Edges.scala:92:{28,37}] reg stall_counter; // @[Edges.scala:229:27] wire _stall_last_T = stall_counter; // @[Edges.scala:229:27, :232:25] wire [1:0] _stall_counter1_T = {1'h0, stall_counter} - 2'h1; // @[Edges.scala:229:27, :230:28] wire stall_counter1 = _stall_counter1_T[0]; // @[Edges.scala:230:28] wire stall_first = ~stall_counter; // @[Edges.scala:229:27, :231:25] wire _stall_count_T = ~stall_counter1; // @[Edges.scala:230:28, :234:27] wire _stall_counter_T = ~stall_first & stall_counter1; // @[Edges.scala:230:28, :231:25, :236:21] wire stall = _stall_T & stall_first; // @[Edges.scala:231:25] wire _nodeOut_a_valid_T = ~stall; // @[BusBypass.scala:86:40, :88:21] wire _nodeOut_a_valid_T_1 = _nodeOut_a_valid_T & nodeIn_a_valid; // @[BusBypass.scala:88:{21,28}] assign _nodeOut_a_valid_T_2 = _nodeOut_a_valid_T_1 & bypass; // @[BusBypass.scala:81:21, :88:{28,42}] assign nodeOut_a_valid = _nodeOut_a_valid_T_2; // @[BusBypass.scala:88:42] wire _nodeOut_a_valid_T_3 = ~stall; // @[BusBypass.scala:86:40, :88:21, :89:21] wire _nodeOut_a_valid_T_4 = _nodeOut_a_valid_T_3 & nodeIn_a_valid; // @[BusBypass.scala:89:{21,28}] wire _nodeOut_a_valid_T_5 = ~bypass; // @[BusBypass.scala:81:21, :89:45] assign _nodeOut_a_valid_T_6 = _nodeOut_a_valid_T_4 & _nodeOut_a_valid_T_5; // @[BusBypass.scala:89:{28,42,45}] assign x1_nodeOut_a_valid = _nodeOut_a_valid_T_6; // @[BusBypass.scala:89:42] wire _nodeIn_a_ready_T = ~stall; // @[BusBypass.scala:86:40, :88:21, :90:21] wire _nodeIn_a_ready_T_1 = bypass ? nodeOut_a_ready : x1_nodeOut_a_ready; // @[BusBypass.scala:81:21, :90:34] assign _nodeIn_a_ready_T_2 = _nodeIn_a_ready_T & _nodeIn_a_ready_T_1; // @[BusBypass.scala:90:{21,28,34}] assign nodeIn_a_ready = _nodeIn_a_ready_T_2; // @[BusBypass.scala:90:28] assign nodeOut_a_bits_address = {119'h0, nodeIn_a_bits_address}; // @[BusBypass.scala:91:18] assign _nodeOut_d_ready_T = nodeIn_d_ready & bypass; // @[BusBypass.scala:81:21, :94:32] assign nodeOut_d_ready = _nodeOut_d_ready_T; // @[BusBypass.scala:94:32] wire _nodeOut_d_ready_T_1 = ~bypass; // @[BusBypass.scala:81:21, :89:45, :95:35] assign _nodeOut_d_ready_T_2 = nodeIn_d_ready & _nodeOut_d_ready_T_1; // @[BusBypass.scala:95:{32,35}] assign x1_nodeOut_d_ready = _nodeOut_d_ready_T_2; // @[BusBypass.scala:95:32] assign _nodeIn_d_valid_T = bypass ? nodeOut_d_valid : x1_nodeOut_d_valid; // @[BusBypass.scala:81:21, :96:24] assign nodeIn_d_valid = _nodeIn_d_valid_T; // @[BusBypass.scala:96:24] assign _nodeIn_d_bits_T_opcode = bypass ? nodeIn_d_bits_out_opcode : nodeIn_d_bits_out_1_opcode; // @[BusBypass.scala:81:21, :97:53, :98:21] assign _nodeIn_d_bits_T_param = bypass ? nodeIn_d_bits_out_param : nodeIn_d_bits_out_1_param; // @[BusBypass.scala:81:21, :97:53, :98:21] assign _nodeIn_d_bits_T_size = bypass ? nodeIn_d_bits_out_size : nodeIn_d_bits_out_1_size; // @[BusBypass.scala:81:21, :97:53, :98:21] assign _nodeIn_d_bits_T_source = ~bypass & nodeIn_d_bits_out_1_source; // @[BusBypass.scala:81:21, :97:53, :98:21] assign _nodeIn_d_bits_T_sink = ~bypass & nodeIn_d_bits_out_1_sink; // @[BusBypass.scala:81:21, :97:53, :98:21] assign _nodeIn_d_bits_T_denied = bypass ? nodeIn_d_bits_out_denied : nodeIn_d_bits_out_1_denied; // @[BusBypass.scala:81:21, :97:53, :98:21] assign _nodeIn_d_bits_T_data = bypass ? 32'h0 : nodeIn_d_bits_out_1_data; // @[BusBypass.scala:81:21, :97:53, :98:21] assign _nodeIn_d_bits_T_corrupt = bypass ? nodeIn_d_bits_out_corrupt : nodeIn_d_bits_out_1_corrupt; // @[BusBypass.scala:81:21, :97:53, :98:21] assign nodeIn_d_bits_opcode = _nodeIn_d_bits_T_opcode; // @[BusBypass.scala:98:21] assign nodeIn_d_bits_param = _nodeIn_d_bits_T_param; // @[BusBypass.scala:98:21] assign nodeIn_d_bits_size = _nodeIn_d_bits_T_size; // @[BusBypass.scala:98:21] assign nodeIn_d_bits_source = _nodeIn_d_bits_T_source; // @[BusBypass.scala:98:21] assign nodeIn_d_bits_sink = _nodeIn_d_bits_T_sink; // @[BusBypass.scala:98:21] assign nodeIn_d_bits_denied = _nodeIn_d_bits_T_denied; // @[BusBypass.scala:98:21] assign nodeIn_d_bits_data = _nodeIn_d_bits_T_data; // @[BusBypass.scala:98:21] assign nodeIn_d_bits_corrupt = _nodeIn_d_bits_T_corrupt; // @[BusBypass.scala:98:21] always @(posedge clock) begin // @[BusBypass.scala:66:9] if (reset) begin // @[BusBypass.scala:66:9] in_reset <= 1'h1; // @[BusBypass.scala:79:27] flight <= 2'h0; // @[Edges.scala:295:25] r_counter <= 1'h0; // @[Edges.scala:229:27] r_counter_3 <= 1'h0; // @[Edges.scala:229:27] stall_counter <= 1'h0; // @[Edges.scala:229:27] end else begin // @[BusBypass.scala:66:9] in_reset <= 1'h0; // @[BusBypass.scala:79:27] flight <= next_flight; // @[Edges.scala:295:25, :324:46] if (_T) // @[Decoupled.scala:51:35] r_counter <= _r_counter_T; // @[Edges.scala:229:27, :236:21] if (_T_3) // @[Decoupled.scala:51:35] r_counter_3 <= _r_counter_T_3; // @[Edges.scala:229:27, :236:21] if (_stall_T_1) // @[Decoupled.scala:51:35] stall_counter <= _stall_counter_T; // @[Edges.scala:229:27, :236:21] end if (in_reset | next_flight == 2'h0) // @[Edges.scala:324:46] bypass_reg <= io_bypass_0; // @[BusBypass.scala:66:9, :80:25] always @(posedge) TLMonitor_34 monitor ( // @[Nodes.scala:27:25] .clock (clock), .reset (reset), .io_in_a_ready (nodeIn_a_ready), // @[MixedNode.scala:551:17] .io_in_a_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17] .io_in_a_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17] .io_in_a_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17] .io_in_a_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17] .io_in_d_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17] .io_in_d_valid (nodeIn_d_valid), // @[MixedNode.scala:551:17] .io_in_d_bits_opcode (nodeIn_d_bits_opcode), // @[MixedNode.scala:551:17] .io_in_d_bits_param (nodeIn_d_bits_param), // @[MixedNode.scala:551:17] .io_in_d_bits_size (nodeIn_d_bits_size), // @[MixedNode.scala:551:17] .io_in_d_bits_source (nodeIn_d_bits_source), // @[MixedNode.scala:551:17] .io_in_d_bits_sink (nodeIn_d_bits_sink), // @[MixedNode.scala:551:17] .io_in_d_bits_denied (nodeIn_d_bits_denied), // @[MixedNode.scala:551:17] .io_in_d_bits_data (nodeIn_d_bits_data), // @[MixedNode.scala:551:17] .io_in_d_bits_corrupt (nodeIn_d_bits_corrupt) // @[MixedNode.scala:551:17] ); // @[Nodes.scala:27:25] assign auto_in_a_ready = auto_in_a_ready_0; // @[BusBypass.scala:66:9] assign auto_in_d_valid = auto_in_d_valid_0; // @[BusBypass.scala:66:9] assign auto_in_d_bits_opcode = auto_in_d_bits_opcode_0; // @[BusBypass.scala:66:9] assign auto_in_d_bits_param = auto_in_d_bits_param_0; // @[BusBypass.scala:66:9] assign auto_in_d_bits_size = auto_in_d_bits_size_0; // @[BusBypass.scala:66:9] assign auto_in_d_bits_source = auto_in_d_bits_source_0; // @[BusBypass.scala:66:9] assign auto_in_d_bits_sink = auto_in_d_bits_sink_0; // @[BusBypass.scala:66:9] assign auto_in_d_bits_denied = auto_in_d_bits_denied_0; // @[BusBypass.scala:66:9] assign auto_in_d_bits_data = auto_in_d_bits_data_0; // @[BusBypass.scala:66:9] assign auto_in_d_bits_corrupt = auto_in_d_bits_corrupt_0; // @[BusBypass.scala:66:9] assign auto_out_1_a_valid = auto_out_1_a_valid_0; // @[BusBypass.scala:66:9] assign auto_out_1_a_bits_opcode = auto_out_1_a_bits_opcode_0; // @[BusBypass.scala:66:9] assign auto_out_1_a_bits_address = auto_out_1_a_bits_address_0; // @[BusBypass.scala:66:9] assign auto_out_1_a_bits_data = auto_out_1_a_bits_data_0; // @[BusBypass.scala:66:9] assign auto_out_1_d_ready = auto_out_1_d_ready_0; // @[BusBypass.scala:66:9] assign auto_out_0_a_valid = auto_out_0_a_valid_0; // @[BusBypass.scala:66:9] assign auto_out_0_a_bits_opcode = auto_out_0_a_bits_opcode_0; // @[BusBypass.scala:66:9] assign auto_out_0_a_bits_address = auto_out_0_a_bits_address_0; // @[BusBypass.scala:66:9] assign auto_out_0_a_bits_data = auto_out_0_a_bits_data_0; // @[BusBypass.scala:66:9] assign auto_out_0_d_ready = auto_out_0_d_ready_0; // @[BusBypass.scala:66:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_2( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [28:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [7:0] io_in_d_bits_source // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire [12:0] _GEN = {10'h0, io_in_a_bits_size}; // @[package.scala:243:71] wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35] reg [2:0] a_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [7:0] source; // @[Monitor.scala:390:22] reg [28:0] address; // @[Monitor.scala:391:22] reg [2:0] d_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [7:0] source_1; // @[Monitor.scala:541:22] reg [128:0] inflight; // @[Monitor.scala:614:27] reg [515:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [515:0] inflight_sizes; // @[Monitor.scala:618:33] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _GEN_0 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35] wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46] wire _GEN_1 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74] reg [31:0] watchdog; // @[Monitor.scala:709:27] reg [128:0] inflight_1; // @[Monitor.scala:726:35] reg [515:0] inflight_sizes_1; // @[Monitor.scala:728:35] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File InputUnit.scala: package constellation.router import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.util._ import constellation.channel._ import constellation.routing.{FlowRoutingBundle} import constellation.noc.{HasNoCParams} class AbstractInputUnitIO( val cParam: BaseChannelParams, val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams], )(implicit val p: Parameters) extends Bundle with HasRouterOutputParams { val nodeId = cParam.destId val router_req = Decoupled(new RouteComputerReq) val router_resp = Input(new RouteComputerResp(outParams, egressParams)) val vcalloc_req = Decoupled(new VCAllocReq(cParam, outParams, egressParams)) val vcalloc_resp = Input(new VCAllocResp(outParams, egressParams)) val out_credit_available = Input(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })) val salloc_req = Vec(cParam.destSpeedup, Decoupled(new SwitchAllocReq(outParams, egressParams))) val out = Vec(cParam.destSpeedup, Valid(new SwitchBundle(outParams, egressParams))) val debug = Output(new Bundle { val va_stall = UInt(log2Ceil(cParam.nVirtualChannels).W) val sa_stall = UInt(log2Ceil(cParam.nVirtualChannels).W) }) val block = Input(Bool()) } abstract class AbstractInputUnit( val cParam: BaseChannelParams, val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams] )(implicit val p: Parameters) extends Module with HasRouterOutputParams with HasNoCParams { val nodeId = cParam.destId def io: AbstractInputUnitIO } class InputBuffer(cParam: ChannelParams)(implicit p: Parameters) extends Module { val nVirtualChannels = cParam.nVirtualChannels val io = IO(new Bundle { val enq = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits)))) val deq = Vec(cParam.nVirtualChannels, Decoupled(new BaseFlit(cParam.payloadBits))) }) val useOutputQueues = cParam.useOutputQueues val delims = if (useOutputQueues) { cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize else 0).scanLeft(0)(_+_) } else { // If no queuing, have to add an additional slot since head == tail implies empty // TODO this should be fixed, should use all slots available cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize + 1 else 0).scanLeft(0)(_+_) } val starts = delims.dropRight(1).zipWithIndex.map { case (s,i) => if (cParam.virtualChannelParams(i).traversable) s else 0 } val ends = delims.tail.zipWithIndex.map { case (s,i) => if (cParam.virtualChannelParams(i).traversable) s else 0 } val fullSize = delims.last // Ugly case. Use multiple queues if ((cParam.srcSpeedup > 1 || cParam.destSpeedup > 1 || fullSize <= 1) || !cParam.unifiedBuffer) { require(useOutputQueues) val qs = cParam.virtualChannelParams.map(v => Module(new Queue(new BaseFlit(cParam.payloadBits), v.bufferSize))) qs.zipWithIndex.foreach { case (q,i) => val sel = io.enq.map(f => f.valid && f.bits.virt_channel_id === i.U) q.io.enq.valid := sel.orR q.io.enq.bits.head := Mux1H(sel, io.enq.map(_.bits.head)) q.io.enq.bits.tail := Mux1H(sel, io.enq.map(_.bits.tail)) q.io.enq.bits.payload := Mux1H(sel, io.enq.map(_.bits.payload)) io.deq(i) <> q.io.deq } } else { val mem = Mem(fullSize, new BaseFlit(cParam.payloadBits)) val heads = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W)))) val tails = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W)))) val empty = (heads zip tails).map(t => t._1 === t._2) val qs = Seq.fill(nVirtualChannels) { Module(new Queue(new BaseFlit(cParam.payloadBits), 1, pipe=true)) } qs.foreach(_.io.enq.valid := false.B) qs.foreach(_.io.enq.bits := DontCare) val vc_sel = UIntToOH(io.enq(0).bits.virt_channel_id) val flit = Wire(new BaseFlit(cParam.payloadBits)) val direct_to_q = (Mux1H(vc_sel, qs.map(_.io.enq.ready)) && Mux1H(vc_sel, empty)) && useOutputQueues.B flit.head := io.enq(0).bits.head flit.tail := io.enq(0).bits.tail flit.payload := io.enq(0).bits.payload when (io.enq(0).valid && !direct_to_q) { val tail = tails(io.enq(0).bits.virt_channel_id) mem.write(tail, flit) tails(io.enq(0).bits.virt_channel_id) := Mux( tail === Mux1H(vc_sel, ends.map(_ - 1).map(_ max 0).map(_.U)), Mux1H(vc_sel, starts.map(_.U)), tail + 1.U) } .elsewhen (io.enq(0).valid && direct_to_q) { for (i <- 0 until nVirtualChannels) { when (io.enq(0).bits.virt_channel_id === i.U) { qs(i).io.enq.valid := true.B qs(i).io.enq.bits := flit } } } if (useOutputQueues) { val can_to_q = (0 until nVirtualChannels).map { i => !empty(i) && qs(i).io.enq.ready } val to_q_oh = PriorityEncoderOH(can_to_q) val to_q = OHToUInt(to_q_oh) when (can_to_q.orR) { val head = Mux1H(to_q_oh, heads) heads(to_q) := Mux( head === Mux1H(to_q_oh, ends.map(_ - 1).map(_ max 0).map(_.U)), Mux1H(to_q_oh, starts.map(_.U)), head + 1.U) for (i <- 0 until nVirtualChannels) { when (to_q_oh(i)) { qs(i).io.enq.valid := true.B qs(i).io.enq.bits := mem.read(head) } } } for (i <- 0 until nVirtualChannels) { io.deq(i) <> qs(i).io.deq } } else { qs.map(_.io.deq.ready := false.B) val ready_sel = io.deq.map(_.ready) val fire = io.deq.map(_.fire) assert(PopCount(fire) <= 1.U) val head = Mux1H(fire, heads) when (fire.orR) { val fire_idx = OHToUInt(fire) heads(fire_idx) := Mux( head === Mux1H(fire, ends.map(_ - 1).map(_ max 0).map(_.U)), Mux1H(fire, starts.map(_.U)), head + 1.U) } val read_flit = mem.read(head) for (i <- 0 until nVirtualChannels) { io.deq(i).valid := !empty(i) io.deq(i).bits := read_flit } } } } class InputUnit(cParam: ChannelParams, outParams: Seq[ChannelParams], egressParams: Seq[EgressChannelParams], combineRCVA: Boolean, combineSAST: Boolean ) (implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) { val nVirtualChannels = cParam.nVirtualChannels val virtualChannelParams = cParam.virtualChannelParams class InputUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) { val in = Flipped(new Channel(cParam.asInstanceOf[ChannelParams])) } val io = IO(new InputUnitIO) val g_i :: g_r :: g_v :: g_a :: g_c :: Nil = Enum(5) class InputState extends Bundle { val g = UInt(3.W) val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }) val flow = new FlowRoutingBundle val fifo_deps = UInt(nVirtualChannels.W) } val input_buffer = Module(new InputBuffer(cParam)) for (i <- 0 until cParam.srcSpeedup) { input_buffer.io.enq(i) := io.in.flit(i) } input_buffer.io.deq.foreach(_.ready := false.B) val route_arbiter = Module(new Arbiter( new RouteComputerReq, nVirtualChannels )) io.router_req <> route_arbiter.io.out val states = Reg(Vec(nVirtualChannels, new InputState)) val anyFifo = cParam.possibleFlows.map(_.fifo).reduce(_||_) val allFifo = cParam.possibleFlows.map(_.fifo).reduce(_&&_) if (anyFifo) { val idle_mask = VecInit(states.map(_.g === g_i)).asUInt for (s <- states) for (i <- 0 until nVirtualChannels) s.fifo_deps := s.fifo_deps & ~idle_mask } for (i <- 0 until cParam.srcSpeedup) { when (io.in.flit(i).fire && io.in.flit(i).bits.head) { val id = io.in.flit(i).bits.virt_channel_id assert(id < nVirtualChannels.U) assert(states(id).g === g_i) val at_dest = io.in.flit(i).bits.flow.egress_node === nodeId.U states(id).g := Mux(at_dest, g_v, g_r) states(id).vc_sel.foreach(_.foreach(_ := false.B)) for (o <- 0 until nEgress) { when (o.U === io.in.flit(i).bits.flow.egress_node_id) { states(id).vc_sel(o+nOutputs)(0) := true.B } } states(id).flow := io.in.flit(i).bits.flow if (anyFifo) { val fifo = cParam.possibleFlows.filter(_.fifo).map(_.isFlow(io.in.flit(i).bits.flow)).toSeq.orR states(id).fifo_deps := VecInit(states.zipWithIndex.map { case (s, j) => s.g =/= g_i && s.flow.asUInt === io.in.flit(i).bits.flow.asUInt && j.U =/= id }).asUInt } } } (route_arbiter.io.in zip states).zipWithIndex.map { case ((i,s),idx) => if (virtualChannelParams(idx).traversable) { i.valid := s.g === g_r i.bits.flow := s.flow i.bits.src_virt_id := idx.U when (i.fire) { s.g := g_v } } else { i.valid := false.B i.bits := DontCare } } when (io.router_req.fire) { val id = io.router_req.bits.src_virt_id assert(states(id).g === g_r) states(id).g := g_v for (i <- 0 until nVirtualChannels) { when (i.U === id) { states(i).vc_sel := io.router_resp.vc_sel } } } val mask = RegInit(0.U(nVirtualChannels.W)) val vcalloc_reqs = Wire(Vec(nVirtualChannels, new VCAllocReq(cParam, outParams, egressParams))) val vcalloc_vals = Wire(Vec(nVirtualChannels, Bool())) val vcalloc_filter = PriorityEncoderOH(Cat(vcalloc_vals.asUInt, vcalloc_vals.asUInt & ~mask)) val vcalloc_sel = vcalloc_filter(nVirtualChannels-1,0) | (vcalloc_filter >> nVirtualChannels) // Prioritize incoming packetes when (io.router_req.fire) { mask := (1.U << io.router_req.bits.src_virt_id) - 1.U } .elsewhen (vcalloc_vals.orR) { mask := Mux1H(vcalloc_sel, (0 until nVirtualChannels).map { w => ~(0.U((w+1).W)) }) } io.vcalloc_req.valid := vcalloc_vals.orR io.vcalloc_req.bits := Mux1H(vcalloc_sel, vcalloc_reqs) states.zipWithIndex.map { case (s,idx) => if (virtualChannelParams(idx).traversable) { vcalloc_vals(idx) := s.g === g_v && s.fifo_deps === 0.U vcalloc_reqs(idx).in_vc := idx.U vcalloc_reqs(idx).vc_sel := s.vc_sel vcalloc_reqs(idx).flow := s.flow when (vcalloc_vals(idx) && vcalloc_sel(idx) && io.vcalloc_req.ready) { s.g := g_a } if (combineRCVA) { when (route_arbiter.io.in(idx).fire) { vcalloc_vals(idx) := true.B vcalloc_reqs(idx).vc_sel := io.router_resp.vc_sel } } } else { vcalloc_vals(idx) := false.B vcalloc_reqs(idx) := DontCare } } io.debug.va_stall := PopCount(vcalloc_vals) - io.vcalloc_req.ready when (io.vcalloc_req.fire) { for (i <- 0 until nVirtualChannels) { when (vcalloc_sel(i)) { states(i).vc_sel := io.vcalloc_resp.vc_sel states(i).g := g_a if (!combineRCVA) { assert(states(i).g === g_v) } } } } val salloc_arb = Module(new SwitchArbiter( nVirtualChannels, cParam.destSpeedup, outParams, egressParams )) (states zip salloc_arb.io.in).zipWithIndex.map { case ((s,r),i) => if (virtualChannelParams(i).traversable) { val credit_available = (s.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U r.valid := s.g === g_a && credit_available && input_buffer.io.deq(i).valid r.bits.vc_sel := s.vc_sel val deq_tail = input_buffer.io.deq(i).bits.tail r.bits.tail := deq_tail when (r.fire && deq_tail) { s.g := g_i } input_buffer.io.deq(i).ready := r.ready } else { r.valid := false.B r.bits := DontCare } } io.debug.sa_stall := PopCount(salloc_arb.io.in.map(r => r.valid && !r.ready)) io.salloc_req <> salloc_arb.io.out when (io.block) { salloc_arb.io.out.foreach(_.ready := false.B) io.salloc_req.foreach(_.valid := false.B) } class OutBundle extends Bundle { val valid = Bool() val vid = UInt(virtualChannelBits.W) val out_vid = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W) val flit = new Flit(cParam.payloadBits) } val salloc_outs = if (combineSAST) { Wire(Vec(cParam.destSpeedup, new OutBundle)) } else { Reg(Vec(cParam.destSpeedup, new OutBundle)) } io.in.credit_return := salloc_arb.io.out.zipWithIndex.map { case (o, i) => Mux(o.fire, salloc_arb.io.chosen_oh(i), 0.U) }.reduce(_|_) io.in.vc_free := salloc_arb.io.out.zipWithIndex.map { case (o, i) => Mux(o.fire && Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)), salloc_arb.io.chosen_oh(i), 0.U) }.reduce(_|_) for (i <- 0 until cParam.destSpeedup) { val salloc_out = salloc_outs(i) salloc_out.valid := salloc_arb.io.out(i).fire salloc_out.vid := OHToUInt(salloc_arb.io.chosen_oh(i)) val vc_sel = Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.vc_sel)) val channel_oh = vc_sel.map(_.reduce(_||_)).toSeq val virt_channel = Mux1H(channel_oh, vc_sel.map(v => OHToUInt(v)).toSeq) when (salloc_arb.io.out(i).fire) { salloc_out.out_vid := virt_channel salloc_out.flit.payload := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.payload)) salloc_out.flit.head := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.head)) salloc_out.flit.tail := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)) salloc_out.flit.flow := Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.flow)) } .otherwise { salloc_out.out_vid := DontCare salloc_out.flit := DontCare } salloc_out.flit.virt_channel_id := DontCare // this gets set in the switch io.out(i).valid := salloc_out.valid io.out(i).bits.flit := salloc_out.flit io.out(i).bits.out_virt_channel := salloc_out.out_vid } def filterVCSel(sel: MixedVec[Vec[Bool]], srcV: Int) = { if (virtualChannelParams(srcV).traversable) { outParams.zipWithIndex.map { case (oP, oI) => (0 until oP.nVirtualChannels).map { oV => var allow = false virtualChannelParams(srcV).possibleFlows.foreach { pI => allow = allow || routingRelation( cParam.channelRoutingInfos(srcV), oP.channelRoutingInfos(oV), pI ) } if (!allow) sel(oI)(oV) := false.B } } } } (0 until nVirtualChannels).map { i => if (!virtualChannelParams(i).traversable) states(i) := DontCare filterVCSel(states(i).vc_sel, i) } when (reset.asBool) { states.foreach(_.g := g_i) } }
module InputUnit_6( // @[InputUnit.scala:158:7] input clock, // @[InputUnit.scala:158:7] input reset, // @[InputUnit.scala:158:7] input io_vcalloc_req_ready, // @[InputUnit.scala:170:14] output io_vcalloc_req_valid, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_5_0, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_4_0, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_3_0, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_2_0, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_0, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_5_0, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_4_0, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_3_0, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_2_0, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_1_0, // @[InputUnit.scala:170:14] input io_out_credit_available_5_0, // @[InputUnit.scala:170:14] input io_out_credit_available_4_0, // @[InputUnit.scala:170:14] input io_out_credit_available_3_0, // @[InputUnit.scala:170:14] input io_out_credit_available_2_0, // @[InputUnit.scala:170:14] input io_out_credit_available_1_0, // @[InputUnit.scala:170:14] input io_out_credit_available_0_8, // @[InputUnit.scala:170:14] input io_out_credit_available_0_9, // @[InputUnit.scala:170:14] input io_out_credit_available_0_12, // @[InputUnit.scala:170:14] input io_out_credit_available_0_13, // @[InputUnit.scala:170:14] input io_out_credit_available_0_16, // @[InputUnit.scala:170:14] input io_out_credit_available_0_17, // @[InputUnit.scala:170:14] input io_out_credit_available_0_20, // @[InputUnit.scala:170:14] input io_out_credit_available_0_21, // @[InputUnit.scala:170:14] input io_salloc_req_0_ready, // @[InputUnit.scala:170:14] output io_salloc_req_0_valid, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_5_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_4_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_3_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_tail, // @[InputUnit.scala:170:14] output io_out_0_valid, // @[InputUnit.scala:170:14] output io_out_0_bits_flit_head, // @[InputUnit.scala:170:14] output io_out_0_bits_flit_tail, // @[InputUnit.scala:170:14] output [72:0] io_out_0_bits_flit_payload, // @[InputUnit.scala:170:14] output [3:0] io_out_0_bits_flit_flow_vnet_id, // @[InputUnit.scala:170:14] output [5:0] io_out_0_bits_flit_flow_ingress_node, // @[InputUnit.scala:170:14] output [2:0] io_out_0_bits_flit_flow_ingress_node_id, // @[InputUnit.scala:170:14] output [5:0] io_out_0_bits_flit_flow_egress_node, // @[InputUnit.scala:170:14] output [2:0] io_out_0_bits_flit_flow_egress_node_id, // @[InputUnit.scala:170:14] output [4:0] io_debug_va_stall, // @[InputUnit.scala:170:14] output [4:0] io_debug_sa_stall, // @[InputUnit.scala:170:14] input io_in_flit_0_valid, // @[InputUnit.scala:170:14] input io_in_flit_0_bits_head, // @[InputUnit.scala:170:14] input io_in_flit_0_bits_tail, // @[InputUnit.scala:170:14] input [72:0] io_in_flit_0_bits_payload, // @[InputUnit.scala:170:14] input [3:0] io_in_flit_0_bits_flow_vnet_id, // @[InputUnit.scala:170:14] input [5:0] io_in_flit_0_bits_flow_ingress_node, // @[InputUnit.scala:170:14] input [2:0] io_in_flit_0_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14] input [5:0] io_in_flit_0_bits_flow_egress_node, // @[InputUnit.scala:170:14] input [2:0] io_in_flit_0_bits_flow_egress_node_id, // @[InputUnit.scala:170:14] input [4:0] io_in_flit_0_bits_virt_channel_id, // @[InputUnit.scala:170:14] output [21:0] io_in_credit_return, // @[InputUnit.scala:170:14] output [21:0] io_in_vc_free // @[InputUnit.scala:170:14] ); wire vcalloc_vals_21; // @[InputUnit.scala:266:32] wire vcalloc_vals_20; // @[InputUnit.scala:266:32] wire vcalloc_vals_19; // @[InputUnit.scala:266:32] wire vcalloc_vals_18; // @[InputUnit.scala:266:32] wire vcalloc_vals_15; // @[InputUnit.scala:266:32] wire vcalloc_vals_14; // @[InputUnit.scala:266:32] wire vcalloc_vals_11; // @[InputUnit.scala:266:32] wire vcalloc_vals_10; // @[InputUnit.scala:266:32] wire vcalloc_vals_3; // @[InputUnit.scala:266:32] wire vcalloc_vals_2; // @[InputUnit.scala:266:32] wire _salloc_arb_io_in_2_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_3_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_10_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_11_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_14_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_15_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_18_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_19_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_20_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_21_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_out_0_valid; // @[InputUnit.scala:296:26] wire [21:0] _salloc_arb_io_chosen_oh_0; // @[InputUnit.scala:296:26] wire _route_arbiter_io_in_3_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_10_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_11_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_14_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_15_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_18_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_19_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_20_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_21_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_out_valid; // @[InputUnit.scala:187:29] wire [4:0] _route_arbiter_io_out_bits_src_virt_id; // @[InputUnit.scala:187:29] wire _input_buffer_io_deq_0_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_0_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_0_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_1_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_1_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_1_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_2_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_2_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_2_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_2_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_3_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_3_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_3_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_3_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_4_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_4_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_4_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_5_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_5_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_5_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_6_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_6_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_6_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_7_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_7_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_7_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_8_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_8_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_8_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_9_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_9_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_9_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_10_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_10_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_10_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_10_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_11_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_11_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_11_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_11_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_12_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_12_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_12_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_13_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_13_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_13_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_14_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_14_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_14_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_14_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_15_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_15_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_15_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_15_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_16_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_16_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_16_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_17_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_17_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_17_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_18_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_18_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_18_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_18_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_19_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_19_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_19_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_19_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_20_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_20_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_20_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_20_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_21_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_21_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_21_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_21_bits_payload; // @[InputUnit.scala:181:28] reg [2:0] states_2_g; // @[InputUnit.scala:192:19] reg states_2_vc_sel_5_0; // @[InputUnit.scala:192:19] reg states_2_vc_sel_4_0; // @[InputUnit.scala:192:19] reg states_2_vc_sel_3_0; // @[InputUnit.scala:192:19] reg states_2_vc_sel_2_0; // @[InputUnit.scala:192:19] reg states_2_vc_sel_1_0; // @[InputUnit.scala:192:19] reg [3:0] states_2_flow_vnet_id; // @[InputUnit.scala:192:19] reg [5:0] states_2_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_2_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [5:0] states_2_flow_egress_node; // @[InputUnit.scala:192:19] reg [2:0] states_2_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_3_g; // @[InputUnit.scala:192:19] reg states_3_vc_sel_5_0; // @[InputUnit.scala:192:19] reg states_3_vc_sel_4_0; // @[InputUnit.scala:192:19] reg states_3_vc_sel_3_0; // @[InputUnit.scala:192:19] reg states_3_vc_sel_2_0; // @[InputUnit.scala:192:19] reg states_3_vc_sel_1_0; // @[InputUnit.scala:192:19] reg [3:0] states_3_flow_vnet_id; // @[InputUnit.scala:192:19] reg [5:0] states_3_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_3_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [5:0] states_3_flow_egress_node; // @[InputUnit.scala:192:19] reg [2:0] states_3_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_10_g; // @[InputUnit.scala:192:19] reg states_10_vc_sel_5_0; // @[InputUnit.scala:192:19] reg states_10_vc_sel_4_0; // @[InputUnit.scala:192:19] reg states_10_vc_sel_3_0; // @[InputUnit.scala:192:19] reg states_10_vc_sel_2_0; // @[InputUnit.scala:192:19] reg states_10_vc_sel_1_0; // @[InputUnit.scala:192:19] reg [3:0] states_10_flow_vnet_id; // @[InputUnit.scala:192:19] reg [5:0] states_10_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_10_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [5:0] states_10_flow_egress_node; // @[InputUnit.scala:192:19] reg [2:0] states_10_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_11_g; // @[InputUnit.scala:192:19] reg states_11_vc_sel_5_0; // @[InputUnit.scala:192:19] reg states_11_vc_sel_4_0; // @[InputUnit.scala:192:19] reg states_11_vc_sel_3_0; // @[InputUnit.scala:192:19] reg states_11_vc_sel_2_0; // @[InputUnit.scala:192:19] reg states_11_vc_sel_1_0; // @[InputUnit.scala:192:19] reg [3:0] states_11_flow_vnet_id; // @[InputUnit.scala:192:19] reg [5:0] states_11_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_11_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [5:0] states_11_flow_egress_node; // @[InputUnit.scala:192:19] reg [2:0] states_11_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_14_g; // @[InputUnit.scala:192:19] reg states_14_vc_sel_5_0; // @[InputUnit.scala:192:19] reg states_14_vc_sel_4_0; // @[InputUnit.scala:192:19] reg states_14_vc_sel_3_0; // @[InputUnit.scala:192:19] reg states_14_vc_sel_2_0; // @[InputUnit.scala:192:19] reg states_14_vc_sel_1_0; // @[InputUnit.scala:192:19] reg [3:0] states_14_flow_vnet_id; // @[InputUnit.scala:192:19] reg [5:0] states_14_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_14_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [5:0] states_14_flow_egress_node; // @[InputUnit.scala:192:19] reg [2:0] states_14_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_15_g; // @[InputUnit.scala:192:19] reg states_15_vc_sel_5_0; // @[InputUnit.scala:192:19] reg states_15_vc_sel_4_0; // @[InputUnit.scala:192:19] reg states_15_vc_sel_3_0; // @[InputUnit.scala:192:19] reg states_15_vc_sel_2_0; // @[InputUnit.scala:192:19] reg states_15_vc_sel_1_0; // @[InputUnit.scala:192:19] reg [3:0] states_15_flow_vnet_id; // @[InputUnit.scala:192:19] reg [5:0] states_15_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_15_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [5:0] states_15_flow_egress_node; // @[InputUnit.scala:192:19] reg [2:0] states_15_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_18_g; // @[InputUnit.scala:192:19] reg states_18_vc_sel_5_0; // @[InputUnit.scala:192:19] reg states_18_vc_sel_4_0; // @[InputUnit.scala:192:19] reg states_18_vc_sel_3_0; // @[InputUnit.scala:192:19] reg states_18_vc_sel_2_0; // @[InputUnit.scala:192:19] reg states_18_vc_sel_1_0; // @[InputUnit.scala:192:19] reg [3:0] states_18_flow_vnet_id; // @[InputUnit.scala:192:19] reg [5:0] states_18_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_18_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [5:0] states_18_flow_egress_node; // @[InputUnit.scala:192:19] reg [2:0] states_18_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_19_g; // @[InputUnit.scala:192:19] reg states_19_vc_sel_5_0; // @[InputUnit.scala:192:19] reg states_19_vc_sel_4_0; // @[InputUnit.scala:192:19] reg states_19_vc_sel_3_0; // @[InputUnit.scala:192:19] reg states_19_vc_sel_2_0; // @[InputUnit.scala:192:19] reg states_19_vc_sel_1_0; // @[InputUnit.scala:192:19] reg [3:0] states_19_flow_vnet_id; // @[InputUnit.scala:192:19] reg [5:0] states_19_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_19_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [5:0] states_19_flow_egress_node; // @[InputUnit.scala:192:19] reg [2:0] states_19_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_20_g; // @[InputUnit.scala:192:19] reg states_20_vc_sel_5_0; // @[InputUnit.scala:192:19] reg states_20_vc_sel_4_0; // @[InputUnit.scala:192:19] reg states_20_vc_sel_3_0; // @[InputUnit.scala:192:19] reg states_20_vc_sel_2_0; // @[InputUnit.scala:192:19] reg states_20_vc_sel_1_0; // @[InputUnit.scala:192:19] reg [3:0] states_20_flow_vnet_id; // @[InputUnit.scala:192:19] reg [5:0] states_20_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_20_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [5:0] states_20_flow_egress_node; // @[InputUnit.scala:192:19] reg [2:0] states_20_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_21_g; // @[InputUnit.scala:192:19] reg states_21_vc_sel_5_0; // @[InputUnit.scala:192:19] reg states_21_vc_sel_4_0; // @[InputUnit.scala:192:19] reg states_21_vc_sel_3_0; // @[InputUnit.scala:192:19] reg states_21_vc_sel_2_0; // @[InputUnit.scala:192:19] reg states_21_vc_sel_1_0; // @[InputUnit.scala:192:19] reg [3:0] states_21_flow_vnet_id; // @[InputUnit.scala:192:19] reg [5:0] states_21_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_21_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [5:0] states_21_flow_egress_node; // @[InputUnit.scala:192:19] reg [2:0] states_21_flow_egress_node_id; // @[InputUnit.scala:192:19] wire _GEN = io_in_flit_0_valid & io_in_flit_0_bits_head; // @[InputUnit.scala:205:30] wire route_arbiter_io_in_2_valid = states_2_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_3_valid = states_3_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_10_valid = states_10_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_11_valid = states_11_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_14_valid = states_14_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_15_valid = states_15_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_18_valid = states_18_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_19_valid = states_19_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_20_valid = states_20_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_21_valid = states_21_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] reg [21:0] mask; // @[InputUnit.scala:250:21] wire [21:0] _vcalloc_filter_T_3 = {vcalloc_vals_21, vcalloc_vals_20, vcalloc_vals_19, vcalloc_vals_18, 2'h0, vcalloc_vals_15, vcalloc_vals_14, 2'h0, vcalloc_vals_11, vcalloc_vals_10, 6'h0, vcalloc_vals_3, vcalloc_vals_2, 2'h0} & ~mask; // @[InputUnit.scala:158:7, :250:21, :253:{80,87,89}, :266:32] wire [43:0] vcalloc_filter = _vcalloc_filter_T_3[0] ? 44'h1 : _vcalloc_filter_T_3[1] ? 44'h2 : _vcalloc_filter_T_3[2] ? 44'h4 : _vcalloc_filter_T_3[3] ? 44'h8 : _vcalloc_filter_T_3[4] ? 44'h10 : _vcalloc_filter_T_3[5] ? 44'h20 : _vcalloc_filter_T_3[6] ? 44'h40 : _vcalloc_filter_T_3[7] ? 44'h80 : _vcalloc_filter_T_3[8] ? 44'h100 : _vcalloc_filter_T_3[9] ? 44'h200 : _vcalloc_filter_T_3[10] ? 44'h400 : _vcalloc_filter_T_3[11] ? 44'h800 : _vcalloc_filter_T_3[12] ? 44'h1000 : _vcalloc_filter_T_3[13] ? 44'h2000 : _vcalloc_filter_T_3[14] ? 44'h4000 : _vcalloc_filter_T_3[15] ? 44'h8000 : _vcalloc_filter_T_3[16] ? 44'h10000 : _vcalloc_filter_T_3[17] ? 44'h20000 : _vcalloc_filter_T_3[18] ? 44'h40000 : _vcalloc_filter_T_3[19] ? 44'h80000 : _vcalloc_filter_T_3[20] ? 44'h100000 : _vcalloc_filter_T_3[21] ? 44'h200000 : vcalloc_vals_2 ? 44'h1000000 : vcalloc_vals_3 ? 44'h2000000 : vcalloc_vals_10 ? 44'h100000000 : vcalloc_vals_11 ? 44'h200000000 : vcalloc_vals_14 ? 44'h1000000000 : vcalloc_vals_15 ? 44'h2000000000 : vcalloc_vals_18 ? 44'h10000000000 : vcalloc_vals_19 ? 44'h20000000000 : vcalloc_vals_20 ? 44'h40000000000 : {vcalloc_vals_21, 43'h0}; // @[OneHot.scala:85:71] wire [21:0] vcalloc_sel = vcalloc_filter[21:0] | vcalloc_filter[43:22]; // @[Mux.scala:50:70] wire io_vcalloc_req_valid_0 = vcalloc_vals_2 | vcalloc_vals_3 | vcalloc_vals_10 | vcalloc_vals_11 | vcalloc_vals_14 | vcalloc_vals_15 | vcalloc_vals_18 | vcalloc_vals_19 | vcalloc_vals_20 | vcalloc_vals_21; // @[package.scala:81:59] assign vcalloc_vals_2 = states_2_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_3 = states_3_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_10 = states_10_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_11 = states_11_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_14 = states_14_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_15 = states_15_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_18 = states_18_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_19 = states_19_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_20 = states_20_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_21 = states_21_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] wire _GEN_0 = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35] wire _GEN_1 = _GEN_0 & vcalloc_sel[2]; // @[Mux.scala:32:36] wire _GEN_2 = _GEN_0 & vcalloc_sel[3]; // @[Mux.scala:32:36] wire _GEN_3 = _GEN_0 & vcalloc_sel[10]; // @[Mux.scala:32:36] wire _GEN_4 = _GEN_0 & vcalloc_sel[11]; // @[Mux.scala:32:36] wire _GEN_5 = _GEN_0 & vcalloc_sel[14]; // @[Mux.scala:32:36] wire _GEN_6 = _GEN_0 & vcalloc_sel[15]; // @[Mux.scala:32:36] wire _GEN_7 = _GEN_0 & vcalloc_sel[18]; // @[Mux.scala:32:36] wire _GEN_8 = _GEN_0 & vcalloc_sel[19]; // @[Mux.scala:32:36] wire _GEN_9 = _GEN_0 & vcalloc_sel[20]; // @[Mux.scala:32:36] wire _GEN_10 = _GEN_0 & vcalloc_sel[21]; // @[Mux.scala:32:36]
Generate the Verilog code corresponding to the following Chisel files. File Tile.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ import Util._ /** * A Tile is a purely combinational 2D array of passThrough PEs. * a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs * @param width The data width of each PE in bits * @param rows Number of PEs on each row * @param columns Number of PEs on each column */ class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module { val io = IO(new Bundle { val in_a = Input(Vec(rows, inputType)) val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it val in_d = Input(Vec(columns, outputType)) val in_control = Input(Vec(columns, new PEControl(accType))) val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val in_last = Input(Vec(columns, Bool())) val out_a = Output(Vec(rows, inputType)) val out_c = Output(Vec(columns, outputType)) val out_b = Output(Vec(columns, outputType)) val out_control = Output(Vec(columns, new PEControl(accType))) val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val out_last = Output(Vec(columns, Bool())) val in_valid = Input(Vec(columns, Bool())) val out_valid = Output(Vec(columns, Bool())) val bad_dataflow = Output(Bool()) }) import ev._ val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls))) val tileT = tile.transpose // TODO: abstract hori/vert broadcast, all these connections look the same // Broadcast 'a' horizontally across the Tile for (r <- 0 until rows) { tile(r).foldLeft(io.in_a(r)) { case (in_a, pe) => pe.io.in_a := in_a pe.io.out_a } } // Broadcast 'b' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_b(c)) { case (in_b, pe) => pe.io.in_b := (if (tree_reduction) in_b.zero else in_b) pe.io.out_b } } // Broadcast 'd' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_d(c)) { case (in_d, pe) => pe.io.in_d := in_d pe.io.out_c } } // Broadcast 'control' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_control(c)) { case (in_ctrl, pe) => pe.io.in_control := in_ctrl pe.io.out_control } } // Broadcast 'garbage' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_valid(c)) { case (v, pe) => pe.io.in_valid := v pe.io.out_valid } } // Broadcast 'id' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_id(c)) { case (id, pe) => pe.io.in_id := id pe.io.out_id } } // Broadcast 'last' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_last(c)) { case (last, pe) => pe.io.in_last := last pe.io.out_last } } // Drive the Tile's bottom IO for (c <- 0 until columns) { io.out_c(c) := tile(rows-1)(c).io.out_c io.out_control(c) := tile(rows-1)(c).io.out_control io.out_id(c) := tile(rows-1)(c).io.out_id io.out_last(c) := tile(rows-1)(c).io.out_last io.out_valid(c) := tile(rows-1)(c).io.out_valid io.out_b(c) := { if (tree_reduction) { val prods = tileT(c).map(_.io.out_b) accumulateTree(prods :+ io.in_b(c)) } else { tile(rows - 1)(c).io.out_b } } } io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_) // Drive the Tile's right IO for (r <- 0 until rows) { io.out_a(r) := tile(r)(columns-1).io.out_a } }
module Tile_214( // @[Tile.scala:16:7] input clock, // @[Tile.scala:16:7] input reset, // @[Tile.scala:16:7] input [7:0] io_in_a_0, // @[Tile.scala:17:14] input [19:0] io_in_b_0, // @[Tile.scala:17:14] input [19:0] io_in_d_0, // @[Tile.scala:17:14] input io_in_control_0_dataflow, // @[Tile.scala:17:14] input io_in_control_0_propagate, // @[Tile.scala:17:14] input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14] input [2:0] io_in_id_0, // @[Tile.scala:17:14] input io_in_last_0, // @[Tile.scala:17:14] output [7:0] io_out_a_0, // @[Tile.scala:17:14] output [19:0] io_out_c_0, // @[Tile.scala:17:14] output [19:0] io_out_b_0, // @[Tile.scala:17:14] output io_out_control_0_dataflow, // @[Tile.scala:17:14] output io_out_control_0_propagate, // @[Tile.scala:17:14] output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14] output [2:0] io_out_id_0, // @[Tile.scala:17:14] output io_out_last_0, // @[Tile.scala:17:14] input io_in_valid_0, // @[Tile.scala:17:14] output io_out_valid_0, // @[Tile.scala:17:14] output io_bad_dataflow // @[Tile.scala:17:14] ); wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7] wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7] wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7] wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7] wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7] wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7] wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7] wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7] wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7] wire [7:0] io_out_a_0_0; // @[Tile.scala:16:7] wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7] wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7] wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7] wire io_out_control_0_propagate_0; // @[Tile.scala:16:7] wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7] wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7] wire io_out_last_0_0; // @[Tile.scala:16:7] wire io_out_valid_0_0; // @[Tile.scala:16:7] wire io_bad_dataflow_0; // @[Tile.scala:16:7] PE_470 tile_0_0 ( // @[Tile.scala:42:44] .clock (clock), .reset (reset), .io_in_a (io_in_a_0_0), // @[Tile.scala:16:7] .io_in_b (io_in_b_0_0), // @[Tile.scala:16:7] .io_in_d (io_in_d_0_0), // @[Tile.scala:16:7] .io_out_a (io_out_a_0_0), .io_out_b (io_out_b_0_0), .io_out_c (io_out_c_0_0), .io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7] .io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7] .io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7] .io_out_control_dataflow (io_out_control_0_dataflow_0), .io_out_control_propagate (io_out_control_0_propagate_0), .io_out_control_shift (io_out_control_0_shift_0), .io_in_id (io_in_id_0_0), // @[Tile.scala:16:7] .io_out_id (io_out_id_0_0), .io_in_last (io_in_last_0_0), // @[Tile.scala:16:7] .io_out_last (io_out_last_0_0), .io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7] .io_out_valid (io_out_valid_0_0), .io_bad_dataflow (io_bad_dataflow_0) ); // @[Tile.scala:42:44] assign io_out_a_0 = io_out_a_0_0; // @[Tile.scala:16:7] assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7] assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7] assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7] assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7] assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7] assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7] assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7] assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7] assign io_bad_dataflow = io_bad_dataflow_0; // @[Tile.scala:16:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ResetCatchAndSync.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.{withClockAndReset, withReset} /** Reset: asynchronous assert, * synchronous de-assert * */ class ResetCatchAndSync (sync: Int = 3) extends Module { override def desiredName = s"ResetCatchAndSync_d${sync}" val io = IO(new Bundle { val sync_reset = Output(Bool()) val psd = Input(new PSDTestMode()) }) // Bypass both the resets to the flops themselves (to prevent DFT holes on // those flops) and on the output of the synchronizer circuit (to control // reset to any flops this circuit drives). val post_psd_reset = Mux(io.psd.test_mode, io.psd.test_mode_reset, reset.asBool) withReset(post_psd_reset) { io.sync_reset := Mux(io.psd.test_mode, io.psd.test_mode_reset, ~AsyncResetSynchronizerShiftReg(true.B, sync)) } } object ResetCatchAndSync { def apply(clk: Clock, rst: Bool, sync: Int = 3, name: Option[String] = None, psd: Option[PSDTestMode] = None): Bool = { withClockAndReset(clk, rst) { val catcher = Module (new ResetCatchAndSync(sync)) if (name.isDefined) {catcher.suggestName(name.get)} catcher.io.psd <> psd.getOrElse(WireDefault(0.U.asTypeOf(new PSDTestMode()))) catcher.io.sync_reset } } def apply(clk: Clock, rst: Bool, sync: Int, name: String): Bool = apply(clk, rst, sync, Some(name)) def apply(clk: Clock, rst: Bool, name: String): Bool = apply(clk, rst, name = Some(name)) def apply(clk: Clock, rst: Bool, sync: Int, name: String, psd: PSDTestMode): Bool = apply(clk, rst, sync, Some(name), Some(psd)) def apply(clk: Clock, rst: Bool, name: String, psd: PSDTestMode): Bool = apply(clk, rst, name = Some(name), psd = Some(psd)) } File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) }
module ResetCatchAndSync_d3_2( // @[ResetCatchAndSync.scala:13:7] input clock, // @[ResetCatchAndSync.scala:13:7] input reset, // @[ResetCatchAndSync.scala:13:7] output io_sync_reset // @[ResetCatchAndSync.scala:17:14] ); wire _post_psd_reset_T = reset; // @[ResetCatchAndSync.scala:26:76] wire io_psd_test_mode = 1'h0; // @[ResetCatchAndSync.scala:13:7, :17:14] wire io_psd_test_mode_reset = 1'h0; // @[ResetCatchAndSync.scala:13:7, :17:14] wire _io_sync_reset_T_1; // @[ResetCatchAndSync.scala:28:25] wire io_sync_reset_0; // @[ResetCatchAndSync.scala:13:7] wire post_psd_reset = _post_psd_reset_T; // @[ResetCatchAndSync.scala:26:{27,76}] wire _io_sync_reset_WIRE; // @[ShiftReg.scala:48:24] wire _io_sync_reset_T = ~_io_sync_reset_WIRE; // @[ShiftReg.scala:48:24] assign _io_sync_reset_T_1 = _io_sync_reset_T; // @[ResetCatchAndSync.scala:28:25, :29:7] assign io_sync_reset_0 = _io_sync_reset_T_1; // @[ResetCatchAndSync.scala:13:7, :28:25] AsyncResetSynchronizerShiftReg_w1_d3_i0_116 io_sync_reset_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (post_psd_reset), // @[ResetCatchAndSync.scala:26:27] .io_q (_io_sync_reset_WIRE) ); // @[ShiftReg.scala:45:23] assign io_sync_reset = io_sync_reset_0; // @[ResetCatchAndSync.scala:13:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_58( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_c_ready, // @[Monitor.scala:20:14] input io_in_c_valid, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_size, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_c_bits_address, // @[Monitor.scala:20:14] input io_in_c_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt, // @[Monitor.scala:20:14] input io_in_e_valid, // @[Monitor.scala:20:14] input [2:0] io_in_e_bits_sink // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire [12:0] _GEN = {10'h0, io_in_a_bits_size}; // @[package.scala:243:71] wire [12:0] _GEN_0 = {10'h0, io_in_c_bits_size}; // @[package.scala:243:71] wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35] reg [2:0] a_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [2:0] source; // @[Monitor.scala:390:22] reg [31:0] address; // @[Monitor.scala:391:22] wire _d_first_T_3 = io_in_d_ready & io_in_d_valid; // @[Decoupled.scala:51:35] reg [2:0] d_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [2:0] source_1; // @[Monitor.scala:541:22] reg [2:0] sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] wire _c_first_T_1 = io_in_c_ready & io_in_c_valid; // @[Decoupled.scala:51:35] reg [2:0] c_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_3; // @[Monitor.scala:515:22] reg [2:0] param_3; // @[Monitor.scala:516:22] reg [2:0] size_3; // @[Monitor.scala:517:22] reg [2:0] source_3; // @[Monitor.scala:518:22] reg [31:0] address_2; // @[Monitor.scala:519:22] reg [4:0] inflight; // @[Monitor.scala:614:27] reg [19:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [19:0] inflight_sizes; // @[Monitor.scala:618:33] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire [7:0] _GEN_1 = {5'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35] wire _GEN_2 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35] wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46] wire _GEN_3 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74] wire [7:0] _GEN_4 = {5'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35] reg [31:0] watchdog; // @[Monitor.scala:709:27] reg [4:0] inflight_1; // @[Monitor.scala:726:35] reg [19:0] inflight_sizes_1; // @[Monitor.scala:728:35] reg [2:0] c_first_counter_1; // @[Edges.scala:229:27] wire c_first_1 = c_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _GEN_5 = io_in_c_bits_opcode[2] & io_in_c_bits_opcode[1]; // @[Edges.scala:68:{36,40,51}] wire [7:0] _GEN_6 = {5'h0, io_in_c_bits_source}; // @[OneHot.scala:58:35] wire _GEN_7 = _c_first_T_1 & c_first_1 & _GEN_5; // @[Decoupled.scala:51:35] reg [31:0] watchdog_1; // @[Monitor.scala:818:27] reg [7:0] inflight_2; // @[Monitor.scala:828:27] reg [2:0] d_first_counter_3; // @[Edges.scala:229:27] wire d_first_3 = d_first_counter_3 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _GEN_8 = _d_first_T_3 & d_first_3 & io_in_d_bits_opcode[2] & ~(io_in_d_bits_opcode[1]); // @[Decoupled.scala:51:35] wire [7:0] _GEN_9 = {5'h0, io_in_d_bits_sink}; // @[OneHot.scala:58:35] wire [7:0] d_set = _GEN_8 ? 8'h1 << _GEN_9 : 8'h0; // @[OneHot.scala:58:35] wire [7:0] _GEN_10 = {5'h0, io_in_e_bits_sink}; // @[OneHot.scala:58:35]
Generate the Verilog code corresponding to the following Chisel files. File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Nodes.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection} case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args)) object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle] { def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo) def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo) def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle) def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle) def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString) override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = { val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge))) monitor.io.in := bundle } override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters = pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }) override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters = pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }) } trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut] case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode case class TLAdapterNode( clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s }, managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLJunctionNode( clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters], managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])( implicit valName: ValName) extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode object TLNameNode { def apply(name: ValName) = TLIdentityNode()(name) def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLIdentityNode = apply(Some(name)) } case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)() object TLTempNode { def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp")) } case class TLNexusNode( clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters, managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)( implicit valName: ValName) extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode abstract class TLCustomNode(implicit valName: ValName) extends CustomNode(TLImp) with TLFormatNode // Asynchronous crossings trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters] object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle] { def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle) def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString) override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLAsyncAdapterNode( clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s }, managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode object TLAsyncNameNode { def apply(name: ValName) = TLAsyncIdentityNode()(name) def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLAsyncIdentityNode = apply(Some(name)) } case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLAsyncImp)( dFn = { p => TLAsyncClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName) extends MixedAdapterNode(TLAsyncImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) }, uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut] // Rationally related crossings trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters] object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle] { def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle) def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */) override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLRationalAdapterNode( clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s }, managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode object TLRationalNameNode { def apply(name: ValName) = TLRationalIdentityNode()(name) def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLRationalIdentityNode = apply(Some(name)) } case class TLRationalSourceNode()(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLRationalImp)( dFn = { p => TLRationalClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName) extends MixedAdapterNode(TLRationalImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut] // Credited version of TileLink channels trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters] object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle] { def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle) def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString) override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLCreditedAdapterNode( clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s }, managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode object TLCreditedNameNode { def apply(name: ValName) = TLCreditedIdentityNode()(name) def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLCreditedIdentityNode = apply(Some(name)) } case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLCreditedImp)( dFn = { p => TLCreditedClientPortParameters(delay, p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLCreditedImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut] File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } } File Arbiter.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ object TLArbiter { // (valids, select) => readys type Policy = (Integer, UInt, Bool) => UInt val lowestIndexFirst: Policy = (width, valids, select) => ~(leftOR(valids) << 1)(width-1, 0) val highestIndexFirst: Policy = (width, valids, select) => ~((rightOR(valids) >> 1).pad(width)) val roundRobin: Policy = (width, valids, select) => if (width == 1) 1.U(1.W) else { val valid = valids(width-1, 0) assert (valid === valids) val mask = RegInit(((BigInt(1) << width)-1).U(width-1,0)) val filter = Cat(valid & ~mask, valid) val unready = (rightOR(filter, width*2, width) >> 1) | (mask << width) val readys = ~((unready >> width) & unready(width-1, 0)) when (select && valid.orR) { mask := leftOR(readys & valid, width) } readys(width-1, 0) } def lowestFromSeq[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: Seq[DecoupledIO[T]]): Unit = { apply(lowestIndexFirst)(sink, sources.map(s => (edge.numBeats1(s.bits), s)):_*) } def lowest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = { apply(lowestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*) } def highest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = { apply(highestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*) } def robin[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = { apply(roundRobin)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*) } def apply[T <: Data](policy: Policy)(sink: DecoupledIO[T], sources: (UInt, DecoupledIO[T])*): Unit = { if (sources.isEmpty) { sink.bits := DontCare } else if (sources.size == 1) { sink :<>= sources.head._2 } else { val pairs = sources.toList val beatsIn = pairs.map(_._1) val sourcesIn = pairs.map(_._2) // The number of beats which remain to be sent val beatsLeft = RegInit(0.U) val idle = beatsLeft === 0.U val latch = idle && sink.ready // winner (if any) claims sink // Who wants access to the sink? val valids = sourcesIn.map(_.valid) // Arbitrate amongst the requests val readys = VecInit(policy(valids.size, Cat(valids.reverse), latch).asBools) // Which request wins arbitration? val winner = VecInit((readys zip valids) map { case (r,v) => r&&v }) // Confirm the policy works properly require (readys.size == valids.size) // Never two winners val prefixOR = winner.scanLeft(false.B)(_||_).init assert((prefixOR zip winner) map { case (p,w) => !p || !w } reduce {_ && _}) // If there was any request, there is a winner assert (!valids.reduce(_||_) || winner.reduce(_||_)) // Track remaining beats val maskedBeats = (winner zip beatsIn) map { case (w,b) => Mux(w, b, 0.U) } val initBeats = maskedBeats.reduce(_ | _) // no winner => 0 beats beatsLeft := Mux(latch, initBeats, beatsLeft - sink.fire) // The one-hot source granted access in the previous cycle val state = RegInit(VecInit(Seq.fill(sources.size)(false.B))) val muxState = Mux(idle, winner, state) state := muxState val allowed = Mux(idle, readys, state) (sourcesIn zip allowed) foreach { case (s, r) => s.ready := sink.ready && r } sink.valid := Mux(idle, valids.reduce(_||_), Mux1H(state, valids)) sink.bits :<= Mux1H(muxState, sourcesIn.map(_.bits)) } } } // Synthesizable unit tests import freechips.rocketchip.unittest._ abstract class DecoupledArbiterTest( policy: TLArbiter.Policy, txns: Int, timeout: Int, val numSources: Int, beatsLeftFromIdx: Int => UInt) (implicit p: Parameters) extends UnitTest(timeout) { val sources = Wire(Vec(numSources, DecoupledIO(UInt(log2Ceil(numSources).W)))) dontTouch(sources.suggestName("sources")) val sink = Wire(DecoupledIO(UInt(log2Ceil(numSources).W))) dontTouch(sink.suggestName("sink")) val count = RegInit(0.U(log2Ceil(txns).W)) val lfsr = LFSR(16, true.B) sources.zipWithIndex.map { case (z, i) => z.bits := i.U } TLArbiter(policy)(sink, sources.zipWithIndex.map { case (z, i) => (beatsLeftFromIdx(i), z) }:_*) count := count + 1.U io.finished := count >= txns.U } /** This tests that when a specific pattern of source valids are driven, * a new index from amongst that pattern is always selected, * unless one of those sources takes multiple beats, * in which case the same index should be selected until the arbiter goes idle. */ class TLDecoupledArbiterRobinTest(txns: Int = 128, timeout: Int = 500000, print: Boolean = false) (implicit p: Parameters) extends DecoupledArbiterTest(TLArbiter.roundRobin, txns, timeout, 6, i => i.U) { val lastWinner = RegInit((numSources+1).U) val beatsLeft = RegInit(0.U(log2Ceil(numSources).W)) val first = lastWinner > numSources.U val valid = lfsr(0) val ready = lfsr(15) sink.ready := ready sources.zipWithIndex.map { // pattern: every even-indexed valid is driven the same random way case (s, i) => s.valid := (if (i % 2 == 1) false.B else valid) } when (sink.fire) { if (print) { printf("TestRobin: %d\n", sink.bits) } when (beatsLeft === 0.U) { assert(lastWinner =/= sink.bits, "Round robin did not pick a new idx despite one being valid.") lastWinner := sink.bits beatsLeft := sink.bits } .otherwise { assert(lastWinner === sink.bits, "Round robin did not pick the same index over multiple beats") beatsLeft := beatsLeft - 1.U } } if (print) { when (!sink.fire) { printf("TestRobin: idle (%d %d)\n", valid, ready) } } } /** This tests that the lowest index is always selected across random single cycle transactions. */ class TLDecoupledArbiterLowestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters) extends DecoupledArbiterTest(TLArbiter.lowestIndexFirst, txns, timeout, 15, _ => 0.U) { def assertLowest(id: Int): Unit = { when (sources(id).valid) { assert((numSources-1 until id by -1).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a higher valid source was granted ready.") } } sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) } sink.ready := lfsr(15) when (sink.fire) { (0 until numSources).foreach(assertLowest(_)) } } /** This tests that the highest index is always selected across random single cycle transactions. */ class TLDecoupledArbiterHighestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters) extends DecoupledArbiterTest(TLArbiter.highestIndexFirst, txns, timeout, 15, _ => 0.U) { def assertHighest(id: Int): Unit = { when (sources(id).valid) { assert((0 until id).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a lower valid source was granted ready.") } } sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) } sink.ready := lfsr(15) when (sink.fire) { (0 until numSources).foreach(assertHighest(_)) } } File Xbar.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.{AddressDecoder, AddressSet, RegionType, IdRange, TriStateValue} import freechips.rocketchip.util.BundleField // Trades off slave port proximity against routing resource cost object ForceFanout { def apply[T]( a: TriStateValue = TriStateValue.unset, b: TriStateValue = TriStateValue.unset, c: TriStateValue = TriStateValue.unset, d: TriStateValue = TriStateValue.unset, e: TriStateValue = TriStateValue.unset)(body: Parameters => T)(implicit p: Parameters) = { body(p.alterPartial { case ForceFanoutKey => p(ForceFanoutKey) match { case ForceFanoutParams(pa, pb, pc, pd, pe) => ForceFanoutParams(a.update(pa), b.update(pb), c.update(pc), d.update(pd), e.update(pe)) } }) } } private case class ForceFanoutParams(a: Boolean, b: Boolean, c: Boolean, d: Boolean, e: Boolean) private case object ForceFanoutKey extends Field(ForceFanoutParams(false, false, false, false, false)) class TLXbar(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule { val node = new TLNexusNode( clientFn = { seq => seq(0).v1copy( echoFields = BundleField.union(seq.flatMap(_.echoFields)), requestFields = BundleField.union(seq.flatMap(_.requestFields)), responseKeys = seq.flatMap(_.responseKeys).distinct, minLatency = seq.map(_.minLatency).min, clients = (TLXbar.mapInputIds(seq) zip seq) flatMap { case (range, port) => port.clients map { client => client.v1copy( sourceId = client.sourceId.shift(range.start) )} } ) }, managerFn = { seq => val fifoIdFactory = TLXbar.relabeler() seq(0).v1copy( responseFields = BundleField.union(seq.flatMap(_.responseFields)), requestKeys = seq.flatMap(_.requestKeys).distinct, minLatency = seq.map(_.minLatency).min, endSinkId = TLXbar.mapOutputIds(seq).map(_.end).max, managers = seq.flatMap { port => require (port.beatBytes == seq(0).beatBytes, s"Xbar ($name with parent $parent) data widths don't match: ${port.managers.map(_.name)} has ${port.beatBytes}B vs ${seq(0).managers.map(_.name)} has ${seq(0).beatBytes}B") val fifoIdMapper = fifoIdFactory() port.managers map { manager => manager.v1copy( fifoId = manager.fifoId.map(fifoIdMapper(_)) )} } ) } ){ override def circuitIdentity = outputs.size == 1 && inputs.size == 1 } lazy val module = new Impl class Impl extends LazyModuleImp(this) { if ((node.in.size * node.out.size) > (8*32)) { println (s"!!! WARNING !!!") println (s" Your TLXbar ($name with parent $parent) is very large, with ${node.in.size} Masters and ${node.out.size} Slaves.") println (s"!!! WARNING !!!") } val wide_bundle = TLBundleParameters.union((node.in ++ node.out).map(_._2.bundle)) override def desiredName = (Seq("TLXbar") ++ nameSuffix ++ Seq(s"i${node.in.size}_o${node.out.size}_${wide_bundle.shortName}")).mkString("_") TLXbar.circuit(policy, node.in, node.out) } } object TLXbar { def mapInputIds(ports: Seq[TLMasterPortParameters]) = assignRanges(ports.map(_.endSourceId)) def mapOutputIds(ports: Seq[TLSlavePortParameters]) = assignRanges(ports.map(_.endSinkId)) def assignRanges(sizes: Seq[Int]) = { val pow2Sizes = sizes.map { z => if (z == 0) 0 else 1 << log2Ceil(z) } val tuples = pow2Sizes.zipWithIndex.sortBy(_._1) // record old index, then sort by increasing size val starts = tuples.scanRight(0)(_._1 + _).tail // suffix-sum of the sizes = the start positions val ranges = (tuples zip starts) map { case ((sz, i), st) => (if (sz == 0) IdRange(0, 0) else IdRange(st, st + sz), i) } ranges.sortBy(_._2).map(_._1) // Restore orignal order } def relabeler() = { var idFactory = 0 () => { val fifoMap = scala.collection.mutable.HashMap.empty[Int, Int] (x: Int) => { if (fifoMap.contains(x)) fifoMap(x) else { val out = idFactory idFactory = idFactory + 1 fifoMap += (x -> out) out } } } } def circuit(policy: TLArbiter.Policy, seqIn: Seq[(TLBundle, TLEdge)], seqOut: Seq[(TLBundle, TLEdge)]) { val (io_in, edgesIn) = seqIn.unzip val (io_out, edgesOut) = seqOut.unzip // Not every master need connect to every slave on every channel; determine which connections are necessary val reachableIO = edgesIn.map { cp => edgesOut.map { mp => cp.client.clients.exists { c => mp.manager.managers.exists { m => c.visibility.exists { ca => m.address.exists { ma => ca.overlaps(ma)}}}} }.toVector}.toVector val probeIO = (edgesIn zip reachableIO).map { case (cp, reachableO) => (edgesOut zip reachableO).map { case (mp, reachable) => reachable && cp.client.anySupportProbe && mp.manager.managers.exists(_.regionType >= RegionType.TRACKED) }.toVector}.toVector val releaseIO = (edgesIn zip reachableIO).map { case (cp, reachableO) => (edgesOut zip reachableO).map { case (mp, reachable) => reachable && cp.client.anySupportProbe && mp.manager.anySupportAcquireB }.toVector}.toVector val connectAIO = reachableIO val connectBIO = probeIO val connectCIO = releaseIO val connectDIO = reachableIO val connectEIO = releaseIO def transpose[T](x: Seq[Seq[T]]) = if (x.isEmpty) Nil else Vector.tabulate(x(0).size) { i => Vector.tabulate(x.size) { j => x(j)(i) } } val connectAOI = transpose(connectAIO) val connectBOI = transpose(connectBIO) val connectCOI = transpose(connectCIO) val connectDOI = transpose(connectDIO) val connectEOI = transpose(connectEIO) // Grab the port ID mapping val inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client)) val outputIdRanges = TLXbar.mapOutputIds(edgesOut.map(_.manager)) // We need an intermediate size of bundle with the widest possible identifiers val wide_bundle = TLBundleParameters.union(io_in.map(_.params) ++ io_out.map(_.params)) // Handle size = 1 gracefully (Chisel3 empty range is broken) def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0) // Transform input bundle sources (sinks use global namespace on both sides) val in = Wire(Vec(io_in.size, TLBundle(wide_bundle))) for (i <- 0 until in.size) { val r = inputIdRanges(i) if (connectAIO(i).exists(x=>x)) { in(i).a.bits.user := DontCare in(i).a.squeezeAll.waiveAll :<>= io_in(i).a.squeezeAll.waiveAll in(i).a.bits.source := io_in(i).a.bits.source | r.start.U } else { in(i).a := DontCare io_in(i).a := DontCare in(i).a.valid := false.B io_in(i).a.ready := true.B } if (connectBIO(i).exists(x=>x)) { io_in(i).b.squeezeAll :<>= in(i).b.squeezeAll io_in(i).b.bits.source := trim(in(i).b.bits.source, r.size) } else { in(i).b := DontCare io_in(i).b := DontCare in(i).b.ready := true.B io_in(i).b.valid := false.B } if (connectCIO(i).exists(x=>x)) { in(i).c.bits.user := DontCare in(i).c.squeezeAll.waiveAll :<>= io_in(i).c.squeezeAll.waiveAll in(i).c.bits.source := io_in(i).c.bits.source | r.start.U } else { in(i).c := DontCare io_in(i).c := DontCare in(i).c.valid := false.B io_in(i).c.ready := true.B } if (connectDIO(i).exists(x=>x)) { io_in(i).d.squeezeAll.waiveAll :<>= in(i).d.squeezeAll.waiveAll io_in(i).d.bits.source := trim(in(i).d.bits.source, r.size) } else { in(i).d := DontCare io_in(i).d := DontCare in(i).d.ready := true.B io_in(i).d.valid := false.B } if (connectEIO(i).exists(x=>x)) { in(i).e.squeezeAll :<>= io_in(i).e.squeezeAll } else { in(i).e := DontCare io_in(i).e := DontCare in(i).e.valid := false.B io_in(i).e.ready := true.B } } // Transform output bundle sinks (sources use global namespace on both sides) val out = Wire(Vec(io_out.size, TLBundle(wide_bundle))) for (o <- 0 until out.size) { val r = outputIdRanges(o) if (connectAOI(o).exists(x=>x)) { out(o).a.bits.user := DontCare io_out(o).a.squeezeAll.waiveAll :<>= out(o).a.squeezeAll.waiveAll } else { out(o).a := DontCare io_out(o).a := DontCare out(o).a.ready := true.B io_out(o).a.valid := false.B } if (connectBOI(o).exists(x=>x)) { out(o).b.squeezeAll :<>= io_out(o).b.squeezeAll } else { out(o).b := DontCare io_out(o).b := DontCare out(o).b.valid := false.B io_out(o).b.ready := true.B } if (connectCOI(o).exists(x=>x)) { out(o).c.bits.user := DontCare io_out(o).c.squeezeAll.waiveAll :<>= out(o).c.squeezeAll.waiveAll } else { out(o).c := DontCare io_out(o).c := DontCare out(o).c.ready := true.B io_out(o).c.valid := false.B } if (connectDOI(o).exists(x=>x)) { out(o).d.squeezeAll :<>= io_out(o).d.squeezeAll out(o).d.bits.sink := io_out(o).d.bits.sink | r.start.U } else { out(o).d := DontCare io_out(o).d := DontCare out(o).d.valid := false.B io_out(o).d.ready := true.B } if (connectEOI(o).exists(x=>x)) { io_out(o).e.squeezeAll :<>= out(o).e.squeezeAll io_out(o).e.bits.sink := trim(out(o).e.bits.sink, r.size) } else { out(o).e := DontCare io_out(o).e := DontCare out(o).e.ready := true.B io_out(o).e.valid := false.B } } // Filter a list to only those elements selected def filter[T](data: Seq[T], mask: Seq[Boolean]) = (data zip mask).filter(_._2).map(_._1) // Based on input=>output connectivity, create per-input minimal address decode circuits val requiredAC = (connectAIO ++ connectCIO).distinct val outputPortFns: Map[Vector[Boolean], Seq[UInt => Bool]] = requiredAC.map { connectO => val port_addrs = edgesOut.map(_.manager.managers.flatMap(_.address)) val routingMask = AddressDecoder(filter(port_addrs, connectO)) val route_addrs = port_addrs.map(seq => AddressSet.unify(seq.map(_.widen(~routingMask)).distinct)) // Print the address mapping if (false) { println("Xbar mapping:") route_addrs.foreach { p => print(" ") p.foreach { a => print(s" ${a}") } println("") } println("--") } (connectO, route_addrs.map(seq => (addr: UInt) => seq.map(_.contains(addr)).reduce(_ || _))) }.toMap // Print the ID mapping if (false) { println(s"XBar mapping:") (edgesIn zip inputIdRanges).zipWithIndex.foreach { case ((edge, id), i) => println(s"\t$i assigned ${id} for ${edge.client.clients.map(_.name).mkString(", ")}") } println("") } val addressA = (in zip edgesIn) map { case (i, e) => e.address(i.a.bits) } val addressC = (in zip edgesIn) map { case (i, e) => e.address(i.c.bits) } def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B val requestAIO = (connectAIO zip addressA) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } } val requestCIO = (connectCIO zip addressC) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } } val requestBOI = out.map { o => inputIdRanges.map { i => i.contains(o.b.bits.source) } } val requestDOI = out.map { o => inputIdRanges.map { i => i.contains(o.d.bits.source) } } val requestEIO = in.map { i => outputIdRanges.map { o => o.contains(i.e.bits.sink) } } val beatsAI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.a.bits) } val beatsBO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.b.bits) } val beatsCI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.c.bits) } val beatsDO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.d.bits) } val beatsEI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.e.bits) } // Fanout the input sources to the output sinks val portsAOI = transpose((in zip requestAIO) map { case (i, r) => TLXbar.fanout(i.a, r, edgesOut.map(_.params(ForceFanoutKey).a)) }) val portsBIO = transpose((out zip requestBOI) map { case (o, r) => TLXbar.fanout(o.b, r, edgesIn .map(_.params(ForceFanoutKey).b)) }) val portsCOI = transpose((in zip requestCIO) map { case (i, r) => TLXbar.fanout(i.c, r, edgesOut.map(_.params(ForceFanoutKey).c)) }) val portsDIO = transpose((out zip requestDOI) map { case (o, r) => TLXbar.fanout(o.d, r, edgesIn .map(_.params(ForceFanoutKey).d)) }) val portsEOI = transpose((in zip requestEIO) map { case (i, r) => TLXbar.fanout(i.e, r, edgesOut.map(_.params(ForceFanoutKey).e)) }) // Arbitrate amongst the sources for (o <- 0 until out.size) { TLArbiter(policy)(out(o).a, filter(beatsAI zip portsAOI(o), connectAOI(o)):_*) TLArbiter(policy)(out(o).c, filter(beatsCI zip portsCOI(o), connectCOI(o)):_*) TLArbiter(policy)(out(o).e, filter(beatsEI zip portsEOI(o), connectEOI(o)):_*) filter(portsAOI(o), connectAOI(o).map(!_)) foreach { r => r.ready := false.B } filter(portsCOI(o), connectCOI(o).map(!_)) foreach { r => r.ready := false.B } filter(portsEOI(o), connectEOI(o).map(!_)) foreach { r => r.ready := false.B } } for (i <- 0 until in.size) { TLArbiter(policy)(in(i).b, filter(beatsBO zip portsBIO(i), connectBIO(i)):_*) TLArbiter(policy)(in(i).d, filter(beatsDO zip portsDIO(i), connectDIO(i)):_*) filter(portsBIO(i), connectBIO(i).map(!_)) foreach { r => r.ready := false.B } filter(portsDIO(i), connectDIO(i).map(!_)) foreach { r => r.ready := false.B } } } def apply(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode = { val xbar = LazyModule(new TLXbar(policy, nameSuffix)) xbar.node } // Replicate an input port to each output port def fanout[T <: TLChannel](input: DecoupledIO[T], select: Seq[Bool], force: Seq[Boolean] = Nil): Seq[DecoupledIO[T]] = { val filtered = Wire(Vec(select.size, chiselTypeOf(input))) for (i <- 0 until select.size) { filtered(i).bits := (if (force.lift(i).getOrElse(false)) IdentityModule(input.bits) else input.bits) filtered(i).valid := input.valid && (select(i) || (select.size == 1).B) } input.ready := Mux1H(select, filtered.map(_.ready)) filtered } } // Synthesizable unit tests import freechips.rocketchip.unittest._ class TLRAMXbar(nManagers: Int, txns: Int)(implicit p: Parameters) extends LazyModule { val fuzz = LazyModule(new TLFuzzer(txns)) val model = LazyModule(new TLRAMModel("Xbar")) val xbar = LazyModule(new TLXbar) xbar.node := TLDelayer(0.1) := model.node := fuzz.node (0 until nManagers) foreach { n => val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff))) ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node } lazy val module = new Impl class Impl extends LazyModuleImp(this) with UnitTestModule { io.finished := fuzz.module.io.finished } } class TLRAMXbarTest(nManagers: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) { val dut = Module(LazyModule(new TLRAMXbar(nManagers,txns)).module) dut.io.start := io.start io.finished := dut.io.finished } class TLMulticlientXbar(nManagers: Int, nClients: Int, txns: Int)(implicit p: Parameters) extends LazyModule { val xbar = LazyModule(new TLXbar) val fuzzers = (0 until nClients) map { n => val fuzz = LazyModule(new TLFuzzer(txns)) xbar.node := TLDelayer(0.1) := fuzz.node fuzz } (0 until nManagers) foreach { n => val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff))) ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node } lazy val module = new Impl class Impl extends LazyModuleImp(this) with UnitTestModule { io.finished := fuzzers.last.module.io.finished } } class TLMulticlientXbarTest(nManagers: Int, nClients: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) { val dut = Module(LazyModule(new TLMulticlientXbar(nManagers, nClients, txns)).module) dut.io.start := io.start io.finished := dut.io.finished }
module TLXbar_cbus_out_i1_o8_a32d64s7k1z4u( // @[Xbar.scala:74:9] input clock, // @[Xbar.scala:74:9] input reset, // @[Xbar.scala:74:9] output auto_anon_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_in_a_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_anon_in_a_bits_size, // @[LazyModuleImp.scala:107:25] input [6:0] auto_anon_in_a_bits_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_anon_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_anon_in_a_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_in_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_anon_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_in_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_anon_in_d_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_anon_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output [6:0] auto_anon_in_d_bits_source, // @[LazyModuleImp.scala:107:25] output auto_anon_in_d_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_anon_in_d_bits_denied, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_in_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_anon_out_7_a_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_out_7_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_7_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_7_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_7_a_bits_size, // @[LazyModuleImp.scala:107:25] output [6:0] auto_anon_out_7_a_bits_source, // @[LazyModuleImp.scala:107:25] output [20:0] auto_anon_out_7_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_7_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_out_7_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_out_7_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_out_7_d_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_out_7_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_7_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_anon_out_7_d_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_7_d_bits_size, // @[LazyModuleImp.scala:107:25] input [6:0] auto_anon_out_7_d_bits_source, // @[LazyModuleImp.scala:107:25] input auto_anon_out_7_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_anon_out_7_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_out_7_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_out_7_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_anon_out_6_a_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_out_6_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_6_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_6_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_6_a_bits_size, // @[LazyModuleImp.scala:107:25] output [6:0] auto_anon_out_6_a_bits_source, // @[LazyModuleImp.scala:107:25] output [16:0] auto_anon_out_6_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_6_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_out_6_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_out_6_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_out_6_d_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_out_6_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_6_d_bits_size, // @[LazyModuleImp.scala:107:25] input [6:0] auto_anon_out_6_d_bits_source, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_out_6_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_out_5_a_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_out_5_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_5_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_5_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_5_a_bits_size, // @[LazyModuleImp.scala:107:25] output [6:0] auto_anon_out_5_a_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_anon_out_5_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_5_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_out_5_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_out_5_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_out_5_d_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_out_5_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_5_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_anon_out_5_d_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_5_d_bits_size, // @[LazyModuleImp.scala:107:25] input [6:0] auto_anon_out_5_d_bits_source, // @[LazyModuleImp.scala:107:25] input auto_anon_out_5_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_anon_out_5_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_out_5_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_out_5_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_anon_out_4_a_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_out_4_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_4_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_4_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_4_a_bits_size, // @[LazyModuleImp.scala:107:25] output [6:0] auto_anon_out_4_a_bits_source, // @[LazyModuleImp.scala:107:25] output [11:0] auto_anon_out_4_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_4_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_out_4_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_out_4_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_out_4_d_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_out_4_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_4_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_4_d_bits_size, // @[LazyModuleImp.scala:107:25] input [6:0] auto_anon_out_4_d_bits_source, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_out_4_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_out_3_a_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_out_3_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_3_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_3_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_3_a_bits_size, // @[LazyModuleImp.scala:107:25] output [6:0] auto_anon_out_3_a_bits_source, // @[LazyModuleImp.scala:107:25] output [27:0] auto_anon_out_3_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_3_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_out_3_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_out_3_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_out_3_d_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_out_3_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_3_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_3_d_bits_size, // @[LazyModuleImp.scala:107:25] input [6:0] auto_anon_out_3_d_bits_source, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_out_3_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_out_2_a_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_out_2_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_2_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_2_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_2_a_bits_size, // @[LazyModuleImp.scala:107:25] output [6:0] auto_anon_out_2_a_bits_source, // @[LazyModuleImp.scala:107:25] output [25:0] auto_anon_out_2_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_2_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_out_2_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_out_2_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_out_2_d_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_out_2_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_2_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_2_d_bits_size, // @[LazyModuleImp.scala:107:25] input [6:0] auto_anon_out_2_d_bits_source, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_out_2_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_out_1_a_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_out_1_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_1_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_1_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_1_a_bits_size, // @[LazyModuleImp.scala:107:25] output [6:0] auto_anon_out_1_a_bits_source, // @[LazyModuleImp.scala:107:25] output [28:0] auto_anon_out_1_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_1_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_out_1_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_out_1_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_out_1_d_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_out_1_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_1_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_anon_out_1_d_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_1_d_bits_size, // @[LazyModuleImp.scala:107:25] input [6:0] auto_anon_out_1_d_bits_source, // @[LazyModuleImp.scala:107:25] input auto_anon_out_1_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_anon_out_1_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_out_1_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_out_1_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_anon_out_0_a_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_out_0_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_0_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_0_a_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_anon_out_0_a_bits_size, // @[LazyModuleImp.scala:107:25] output [6:0] auto_anon_out_0_a_bits_source, // @[LazyModuleImp.scala:107:25] output [13:0] auto_anon_out_0_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_0_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_out_0_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_out_0_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_out_0_d_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_out_0_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_0_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_anon_out_0_d_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_anon_out_0_d_bits_size, // @[LazyModuleImp.scala:107:25] input [6:0] auto_anon_out_0_d_bits_source, // @[LazyModuleImp.scala:107:25] input auto_anon_out_0_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_anon_out_0_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_out_0_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_out_0_d_bits_corrupt // @[LazyModuleImp.scala:107:25] ); wire out_7_d_bits_sink; // @[Xbar.scala:216:19] wire [3:0] out_7_d_bits_size; // @[Xbar.scala:216:19] wire [3:0] out_6_d_bits_size; // @[Xbar.scala:216:19] wire out_5_d_bits_sink; // @[Xbar.scala:216:19] wire [3:0] out_5_d_bits_size; // @[Xbar.scala:216:19] wire [3:0] out_4_d_bits_size; // @[Xbar.scala:216:19] wire [3:0] out_3_d_bits_size; // @[Xbar.scala:216:19] wire [3:0] out_2_d_bits_size; // @[Xbar.scala:216:19] wire out_1_d_bits_sink; // @[Xbar.scala:216:19] wire [3:0] out_1_d_bits_size; // @[Xbar.scala:216:19] wire out_0_d_bits_sink; // @[Xbar.scala:216:19] wire [6:0] in_0_d_bits_source; // @[Xbar.scala:159:18] wire [6:0] in_0_a_bits_source; // @[Xbar.scala:159:18] wire auto_anon_in_a_valid_0 = auto_anon_in_a_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_a_bits_opcode_0 = auto_anon_in_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_a_bits_param_0 = auto_anon_in_a_bits_param; // @[Xbar.scala:74:9] wire [3:0] auto_anon_in_a_bits_size_0 = auto_anon_in_a_bits_size; // @[Xbar.scala:74:9] wire [6:0] auto_anon_in_a_bits_source_0 = auto_anon_in_a_bits_source; // @[Xbar.scala:74:9] wire [31:0] auto_anon_in_a_bits_address_0 = auto_anon_in_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] auto_anon_in_a_bits_mask_0 = auto_anon_in_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] auto_anon_in_a_bits_data_0 = auto_anon_in_a_bits_data; // @[Xbar.scala:74:9] wire auto_anon_in_a_bits_corrupt_0 = auto_anon_in_a_bits_corrupt; // @[Xbar.scala:74:9] wire auto_anon_in_d_ready_0 = auto_anon_in_d_ready; // @[Xbar.scala:74:9] wire auto_anon_out_7_a_ready_0 = auto_anon_out_7_a_ready; // @[Xbar.scala:74:9] wire auto_anon_out_7_d_valid_0 = auto_anon_out_7_d_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_7_d_bits_opcode_0 = auto_anon_out_7_d_bits_opcode; // @[Xbar.scala:74:9] wire [1:0] auto_anon_out_7_d_bits_param_0 = auto_anon_out_7_d_bits_param; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_7_d_bits_size_0 = auto_anon_out_7_d_bits_size; // @[Xbar.scala:74:9] wire [6:0] auto_anon_out_7_d_bits_source_0 = auto_anon_out_7_d_bits_source; // @[Xbar.scala:74:9] wire auto_anon_out_7_d_bits_sink_0 = auto_anon_out_7_d_bits_sink; // @[Xbar.scala:74:9] wire auto_anon_out_7_d_bits_denied_0 = auto_anon_out_7_d_bits_denied; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_7_d_bits_data_0 = auto_anon_out_7_d_bits_data; // @[Xbar.scala:74:9] wire auto_anon_out_7_d_bits_corrupt_0 = auto_anon_out_7_d_bits_corrupt; // @[Xbar.scala:74:9] wire auto_anon_out_6_a_ready_0 = auto_anon_out_6_a_ready; // @[Xbar.scala:74:9] wire auto_anon_out_6_d_valid_0 = auto_anon_out_6_d_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_6_d_bits_size_0 = auto_anon_out_6_d_bits_size; // @[Xbar.scala:74:9] wire [6:0] auto_anon_out_6_d_bits_source_0 = auto_anon_out_6_d_bits_source; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_6_d_bits_data_0 = auto_anon_out_6_d_bits_data; // @[Xbar.scala:74:9] wire auto_anon_out_5_a_ready_0 = auto_anon_out_5_a_ready; // @[Xbar.scala:74:9] wire auto_anon_out_5_d_valid_0 = auto_anon_out_5_d_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_5_d_bits_opcode_0 = auto_anon_out_5_d_bits_opcode; // @[Xbar.scala:74:9] wire [1:0] auto_anon_out_5_d_bits_param_0 = auto_anon_out_5_d_bits_param; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_5_d_bits_size_0 = auto_anon_out_5_d_bits_size; // @[Xbar.scala:74:9] wire [6:0] auto_anon_out_5_d_bits_source_0 = auto_anon_out_5_d_bits_source; // @[Xbar.scala:74:9] wire auto_anon_out_5_d_bits_sink_0 = auto_anon_out_5_d_bits_sink; // @[Xbar.scala:74:9] wire auto_anon_out_5_d_bits_denied_0 = auto_anon_out_5_d_bits_denied; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_5_d_bits_data_0 = auto_anon_out_5_d_bits_data; // @[Xbar.scala:74:9] wire auto_anon_out_5_d_bits_corrupt_0 = auto_anon_out_5_d_bits_corrupt; // @[Xbar.scala:74:9] wire auto_anon_out_4_a_ready_0 = auto_anon_out_4_a_ready; // @[Xbar.scala:74:9] wire auto_anon_out_4_d_valid_0 = auto_anon_out_4_d_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_4_d_bits_opcode_0 = auto_anon_out_4_d_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_4_d_bits_size_0 = auto_anon_out_4_d_bits_size; // @[Xbar.scala:74:9] wire [6:0] auto_anon_out_4_d_bits_source_0 = auto_anon_out_4_d_bits_source; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_4_d_bits_data_0 = auto_anon_out_4_d_bits_data; // @[Xbar.scala:74:9] wire auto_anon_out_3_a_ready_0 = auto_anon_out_3_a_ready; // @[Xbar.scala:74:9] wire auto_anon_out_3_d_valid_0 = auto_anon_out_3_d_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_3_d_bits_opcode_0 = auto_anon_out_3_d_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_3_d_bits_size_0 = auto_anon_out_3_d_bits_size; // @[Xbar.scala:74:9] wire [6:0] auto_anon_out_3_d_bits_source_0 = auto_anon_out_3_d_bits_source; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_3_d_bits_data_0 = auto_anon_out_3_d_bits_data; // @[Xbar.scala:74:9] wire auto_anon_out_2_a_ready_0 = auto_anon_out_2_a_ready; // @[Xbar.scala:74:9] wire auto_anon_out_2_d_valid_0 = auto_anon_out_2_d_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_2_d_bits_opcode_0 = auto_anon_out_2_d_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_2_d_bits_size_0 = auto_anon_out_2_d_bits_size; // @[Xbar.scala:74:9] wire [6:0] auto_anon_out_2_d_bits_source_0 = auto_anon_out_2_d_bits_source; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_2_d_bits_data_0 = auto_anon_out_2_d_bits_data; // @[Xbar.scala:74:9] wire auto_anon_out_1_a_ready_0 = auto_anon_out_1_a_ready; // @[Xbar.scala:74:9] wire auto_anon_out_1_d_valid_0 = auto_anon_out_1_d_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_1_d_bits_opcode_0 = auto_anon_out_1_d_bits_opcode; // @[Xbar.scala:74:9] wire [1:0] auto_anon_out_1_d_bits_param_0 = auto_anon_out_1_d_bits_param; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_1_d_bits_size_0 = auto_anon_out_1_d_bits_size; // @[Xbar.scala:74:9] wire [6:0] auto_anon_out_1_d_bits_source_0 = auto_anon_out_1_d_bits_source; // @[Xbar.scala:74:9] wire auto_anon_out_1_d_bits_sink_0 = auto_anon_out_1_d_bits_sink; // @[Xbar.scala:74:9] wire auto_anon_out_1_d_bits_denied_0 = auto_anon_out_1_d_bits_denied; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_1_d_bits_data_0 = auto_anon_out_1_d_bits_data; // @[Xbar.scala:74:9] wire auto_anon_out_1_d_bits_corrupt_0 = auto_anon_out_1_d_bits_corrupt; // @[Xbar.scala:74:9] wire auto_anon_out_0_a_ready_0 = auto_anon_out_0_a_ready; // @[Xbar.scala:74:9] wire auto_anon_out_0_d_valid_0 = auto_anon_out_0_d_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_0_d_bits_opcode_0 = auto_anon_out_0_d_bits_opcode; // @[Xbar.scala:74:9] wire [1:0] auto_anon_out_0_d_bits_param_0 = auto_anon_out_0_d_bits_param; // @[Xbar.scala:74:9] wire [3:0] auto_anon_out_0_d_bits_size_0 = auto_anon_out_0_d_bits_size; // @[Xbar.scala:74:9] wire [6:0] auto_anon_out_0_d_bits_source_0 = auto_anon_out_0_d_bits_source; // @[Xbar.scala:74:9] wire auto_anon_out_0_d_bits_sink_0 = auto_anon_out_0_d_bits_sink; // @[Xbar.scala:74:9] wire auto_anon_out_0_d_bits_denied_0 = auto_anon_out_0_d_bits_denied; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_0_d_bits_data_0 = auto_anon_out_0_d_bits_data; // @[Xbar.scala:74:9] wire auto_anon_out_0_d_bits_corrupt_0 = auto_anon_out_0_d_bits_corrupt; // @[Xbar.scala:74:9] wire _readys_T_2 = reset; // @[Arbiter.scala:22:12] wire [2:0] auto_anon_out_6_d_bits_opcode = 3'h1; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_5_d_bits_opcode = 3'h1; // @[MixedNode.scala:542:17] wire [2:0] out_6_d_bits_opcode = 3'h1; // @[Xbar.scala:216:19] wire [2:0] portsDIO_filtered_6_0_bits_opcode = 3'h1; // @[Xbar.scala:352:24] wire [1:0] auto_anon_out_6_d_bits_param = 2'h0; // @[Xbar.scala:74:9] wire [1:0] auto_anon_out_4_d_bits_param = 2'h0; // @[Xbar.scala:74:9] wire [1:0] auto_anon_out_3_d_bits_param = 2'h0; // @[Xbar.scala:74:9] wire [1:0] auto_anon_out_2_d_bits_param = 2'h0; // @[Xbar.scala:74:9] wire [1:0] x1_anonOut_1_d_bits_param = 2'h0; // @[MixedNode.scala:542:17] wire [1:0] x1_anonOut_2_d_bits_param = 2'h0; // @[MixedNode.scala:542:17] wire [1:0] x1_anonOut_3_d_bits_param = 2'h0; // @[MixedNode.scala:542:17] wire [1:0] x1_anonOut_5_d_bits_param = 2'h0; // @[MixedNode.scala:542:17] wire [1:0] out_2_d_bits_param = 2'h0; // @[Xbar.scala:216:19] wire [1:0] out_3_d_bits_param = 2'h0; // @[Xbar.scala:216:19] wire [1:0] out_4_d_bits_param = 2'h0; // @[Xbar.scala:216:19] wire [1:0] out_6_d_bits_param = 2'h0; // @[Xbar.scala:216:19] wire [1:0] _requestBOI_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _requestBOI_WIRE_2_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_3_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _requestBOI_WIRE_4_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_5_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _requestBOI_WIRE_6_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_7_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _requestBOI_WIRE_8_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_9_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _requestBOI_WIRE_10_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_11_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _requestBOI_WIRE_12_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_13_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _requestBOI_WIRE_14_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_15_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _beatsBO_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _beatsBO_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _beatsBO_WIRE_2_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _beatsBO_WIRE_3_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _beatsBO_WIRE_4_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _beatsBO_WIRE_5_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _beatsBO_WIRE_6_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _beatsBO_WIRE_7_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _beatsBO_WIRE_8_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _beatsBO_WIRE_9_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _beatsBO_WIRE_10_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _beatsBO_WIRE_11_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _beatsBO_WIRE_12_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _beatsBO_WIRE_13_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _beatsBO_WIRE_14_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _beatsBO_WIRE_15_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _portsBIO_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _portsBIO_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] portsBIO_filtered_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] _portsBIO_WIRE_2_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _portsBIO_WIRE_3_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] portsBIO_filtered_1_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] _portsBIO_WIRE_4_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _portsBIO_WIRE_5_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] portsBIO_filtered_2_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] _portsBIO_WIRE_6_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _portsBIO_WIRE_7_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] portsBIO_filtered_3_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] _portsBIO_WIRE_8_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _portsBIO_WIRE_9_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] portsBIO_filtered_4_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] _portsBIO_WIRE_10_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _portsBIO_WIRE_11_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] portsBIO_filtered_5_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] _portsBIO_WIRE_12_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _portsBIO_WIRE_13_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] portsBIO_filtered_6_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] _portsBIO_WIRE_14_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _portsBIO_WIRE_15_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] portsBIO_filtered_7_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] portsDIO_filtered_2_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] portsDIO_filtered_3_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] portsDIO_filtered_4_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] portsDIO_filtered_6_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] _in_0_d_bits_T_92 = 2'h0; // @[Mux.scala:30:73] wire [1:0] _in_0_d_bits_T_93 = 2'h0; // @[Mux.scala:30:73] wire [1:0] _in_0_d_bits_T_94 = 2'h0; // @[Mux.scala:30:73] wire [1:0] _in_0_d_bits_T_96 = 2'h0; // @[Mux.scala:30:73] wire auto_anon_out_6_d_bits_sink = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_6_d_bits_denied = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_6_d_bits_corrupt = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_4_d_bits_sink = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_4_d_bits_denied = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_4_d_bits_corrupt = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_3_d_bits_sink = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_3_d_bits_denied = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_3_d_bits_corrupt = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_2_d_bits_sink = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_2_d_bits_denied = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_2_d_bits_corrupt = 1'h0; // @[Xbar.scala:74:9] wire x1_anonOut_1_d_bits_sink = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_1_d_bits_denied = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_1_d_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_2_d_bits_sink = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_2_d_bits_denied = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_2_d_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_3_d_bits_sink = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_3_d_bits_denied = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_3_d_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_5_d_bits_sink = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_5_d_bits_denied = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_5_d_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire out_2_d_bits_sink = 1'h0; // @[Xbar.scala:216:19] wire out_2_d_bits_denied = 1'h0; // @[Xbar.scala:216:19] wire out_2_d_bits_corrupt = 1'h0; // @[Xbar.scala:216:19] wire out_3_d_bits_sink = 1'h0; // @[Xbar.scala:216:19] wire out_3_d_bits_denied = 1'h0; // @[Xbar.scala:216:19] wire out_3_d_bits_corrupt = 1'h0; // @[Xbar.scala:216:19] wire out_4_d_bits_sink = 1'h0; // @[Xbar.scala:216:19] wire out_4_d_bits_denied = 1'h0; // @[Xbar.scala:216:19] wire out_4_d_bits_corrupt = 1'h0; // @[Xbar.scala:216:19] wire out_6_d_bits_sink = 1'h0; // @[Xbar.scala:216:19] wire out_6_d_bits_denied = 1'h0; // @[Xbar.scala:216:19] wire out_6_d_bits_corrupt = 1'h0; // @[Xbar.scala:216:19] wire _out_2_d_bits_sink_T = 1'h0; // @[Xbar.scala:251:53] wire _out_3_d_bits_sink_T = 1'h0; // @[Xbar.scala:251:53] wire _out_4_d_bits_sink_T = 1'h0; // @[Xbar.scala:251:53] wire _out_6_d_bits_sink_T = 1'h0; // @[Xbar.scala:251:53] wire _addressC_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _addressC_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _addressC_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _addressC_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _addressC_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _addressC_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _requestBOI_WIRE_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_T = 1'h0; // @[Parameters.scala:54:10] wire _requestBOI_WIRE_2_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_2_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_3_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_3_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_T_5 = 1'h0; // @[Parameters.scala:54:10] wire _requestBOI_WIRE_4_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_4_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_5_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_5_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_T_10 = 1'h0; // @[Parameters.scala:54:10] wire _requestBOI_WIRE_6_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_6_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_6_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_7_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_7_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_7_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_T_15 = 1'h0; // @[Parameters.scala:54:10] wire _requestBOI_WIRE_8_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_8_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_8_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_9_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_9_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_9_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_T_20 = 1'h0; // @[Parameters.scala:54:10] wire _requestBOI_WIRE_10_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_10_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_10_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_11_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_11_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_11_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_T_25 = 1'h0; // @[Parameters.scala:54:10] wire _requestBOI_WIRE_12_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_12_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_12_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_13_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_13_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_13_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_T_30 = 1'h0; // @[Parameters.scala:54:10] wire _requestBOI_WIRE_14_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_14_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_14_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_15_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_15_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_15_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_T_35 = 1'h0; // @[Parameters.scala:54:10] wire _requestDOI_T = 1'h0; // @[Parameters.scala:54:10] wire _requestDOI_T_5 = 1'h0; // @[Parameters.scala:54:10] wire _requestDOI_T_10 = 1'h0; // @[Parameters.scala:54:10] wire _requestDOI_T_15 = 1'h0; // @[Parameters.scala:54:10] wire _requestDOI_T_20 = 1'h0; // @[Parameters.scala:54:10] wire _requestDOI_T_25 = 1'h0; // @[Parameters.scala:54:10] wire _requestDOI_T_30 = 1'h0; // @[Parameters.scala:54:10] wire _requestDOI_T_35 = 1'h0; // @[Parameters.scala:54:10] wire _requestEIO_WIRE_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_2_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_2_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_2_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_3_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_3_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_3_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_4_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_4_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_4_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_5_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_5_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_5_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_6_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_6_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_6_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_7_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_7_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_7_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_8_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_8_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_8_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_9_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_9_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_9_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_10_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_10_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_10_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_11_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_11_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_11_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_12_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_12_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_12_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_13_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_13_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_13_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_14_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_14_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_14_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_15_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_15_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_15_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _beatsBO_WIRE_ready = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_valid = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_opdata_T = 1'h0; // @[Edges.scala:97:37] wire _beatsBO_WIRE_2_ready = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_2_valid = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_3_ready = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_3_valid = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_opdata_T_1 = 1'h0; // @[Edges.scala:97:37] wire _beatsBO_WIRE_4_ready = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_4_valid = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_5_ready = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_5_valid = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_opdata_T_2 = 1'h0; // @[Edges.scala:97:37] wire _beatsBO_WIRE_6_ready = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_6_valid = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_6_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_7_ready = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_7_valid = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_7_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_opdata_T_3 = 1'h0; // @[Edges.scala:97:37] wire _beatsBO_WIRE_8_ready = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_8_valid = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_8_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_9_ready = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_9_valid = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_9_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_opdata_T_4 = 1'h0; // @[Edges.scala:97:37] wire _beatsBO_WIRE_10_ready = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_10_valid = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_10_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_11_ready = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_11_valid = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_11_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_opdata_T_5 = 1'h0; // @[Edges.scala:97:37] wire _beatsBO_WIRE_12_ready = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_12_valid = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_12_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_13_ready = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_13_valid = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_13_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_opdata_T_6 = 1'h0; // @[Edges.scala:97:37] wire _beatsBO_WIRE_14_ready = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_14_valid = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_14_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_15_ready = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_15_valid = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_15_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_opdata_T_7 = 1'h0; // @[Edges.scala:97:37] wire _beatsCI_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _beatsCI_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _beatsCI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _beatsCI_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _beatsCI_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _beatsCI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire beatsCI_opdata = 1'h0; // @[Edges.scala:102:36] wire _beatsEI_WIRE_ready = 1'h0; // @[Bundles.scala:267:74] wire _beatsEI_WIRE_valid = 1'h0; // @[Bundles.scala:267:74] wire _beatsEI_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _beatsEI_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61] wire _beatsEI_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61] wire _beatsEI_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _portsBIO_WIRE_ready = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_valid = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire portsBIO_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsBIO_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsBIO_WIRE_2_ready = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_2_valid = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_3_ready = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_3_valid = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire portsBIO_filtered_1_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_1_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_1_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsBIO_filtered_0_valid_T_3 = 1'h0; // @[Xbar.scala:355:40] wire _portsBIO_WIRE_4_ready = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_4_valid = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_5_ready = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_5_valid = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire portsBIO_filtered_2_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_2_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_2_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsBIO_filtered_0_valid_T_5 = 1'h0; // @[Xbar.scala:355:40] wire _portsBIO_WIRE_6_ready = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_6_valid = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_6_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_7_ready = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_7_valid = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_7_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire portsBIO_filtered_3_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_3_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_3_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsBIO_filtered_0_valid_T_7 = 1'h0; // @[Xbar.scala:355:40] wire _portsBIO_WIRE_8_ready = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_8_valid = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_8_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_9_ready = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_9_valid = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_9_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire portsBIO_filtered_4_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_4_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_4_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsBIO_filtered_0_valid_T_9 = 1'h0; // @[Xbar.scala:355:40] wire _portsBIO_WIRE_10_ready = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_10_valid = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_10_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_11_ready = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_11_valid = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_11_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire portsBIO_filtered_5_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_5_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_5_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsBIO_filtered_0_valid_T_11 = 1'h0; // @[Xbar.scala:355:40] wire _portsBIO_WIRE_12_ready = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_12_valid = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_12_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_13_ready = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_13_valid = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_13_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire portsBIO_filtered_6_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_6_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_6_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsBIO_filtered_0_valid_T_13 = 1'h0; // @[Xbar.scala:355:40] wire _portsBIO_WIRE_14_ready = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_14_valid = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_14_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_15_ready = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_15_valid = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_15_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire portsBIO_filtered_7_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_7_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_7_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsBIO_filtered_0_valid_T_15 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _portsCOI_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _portsCOI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _portsCOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _portsCOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _portsCOI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire portsCOI_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_1_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_1_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_1_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_2_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_2_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_2_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_3_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_3_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_3_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_4_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_4_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_4_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_5_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_5_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_5_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_6_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_6_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_6_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_7_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_7_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_7_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsCOI_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_filtered_1_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_filtered_2_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_filtered_3_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_filtered_4_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_filtered_5_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_filtered_6_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_filtered_7_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_T = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_1 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_2 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_3 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_4 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_5 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_6 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_7 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_8 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_9 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_10 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_11 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_12 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_13 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_14 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_WIRE_2 = 1'h0; // @[Mux.scala:30:73] wire portsDIO_filtered_2_0_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_2_0_bits_denied = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_2_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_3_0_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_3_0_bits_denied = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_3_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_4_0_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_4_0_bits_denied = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_4_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_6_0_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_6_0_bits_denied = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_6_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsEOI_WIRE_ready = 1'h0; // @[Bundles.scala:267:74] wire _portsEOI_WIRE_valid = 1'h0; // @[Bundles.scala:267:74] wire _portsEOI_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _portsEOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61] wire _portsEOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61] wire _portsEOI_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire portsEOI_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_0_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_1_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_1_valid = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_1_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_2_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_2_valid = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_2_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_3_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_3_valid = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_3_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_4_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_4_valid = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_4_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_5_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_5_valid = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_5_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_6_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_6_valid = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_6_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_7_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_7_valid = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_7_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire _portsEOI_filtered_0_valid_T = 1'h0; // @[Xbar.scala:355:54] wire _portsEOI_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_filtered_1_valid_T = 1'h0; // @[Xbar.scala:355:54] wire _portsEOI_filtered_1_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_filtered_2_valid_T = 1'h0; // @[Xbar.scala:355:54] wire _portsEOI_filtered_2_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_filtered_3_valid_T = 1'h0; // @[Xbar.scala:355:54] wire _portsEOI_filtered_3_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_filtered_4_valid_T = 1'h0; // @[Xbar.scala:355:54] wire _portsEOI_filtered_4_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_filtered_5_valid_T = 1'h0; // @[Xbar.scala:355:54] wire _portsEOI_filtered_5_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_filtered_6_valid_T = 1'h0; // @[Xbar.scala:355:54] wire _portsEOI_filtered_6_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_filtered_7_valid_T = 1'h0; // @[Xbar.scala:355:54] wire _portsEOI_filtered_7_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_T = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_1 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_2 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_3 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_4 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_5 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_6 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_7 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_8 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_9 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_10 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_11 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_12 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_13 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_14 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_WIRE_2 = 1'h0; // @[Mux.scala:30:73] wire _state_WIRE_0 = 1'h0; // @[Arbiter.scala:88:34] wire _state_WIRE_1 = 1'h0; // @[Arbiter.scala:88:34] wire _state_WIRE_2 = 1'h0; // @[Arbiter.scala:88:34] wire _state_WIRE_3 = 1'h0; // @[Arbiter.scala:88:34] wire _state_WIRE_4 = 1'h0; // @[Arbiter.scala:88:34] wire _state_WIRE_5 = 1'h0; // @[Arbiter.scala:88:34] wire _state_WIRE_6 = 1'h0; // @[Arbiter.scala:88:34] wire _state_WIRE_7 = 1'h0; // @[Arbiter.scala:88:34] wire _in_0_d_bits_T_2 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_3 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_4 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_6 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_32 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_33 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_34 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_36 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_47 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_48 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_49 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_51 = 1'h0; // @[Mux.scala:30:73] wire _requestCIO_T_4 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_0_0 = 1'h1; // @[Xbar.scala:308:107] wire _requestCIO_T_9 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_0_1 = 1'h1; // @[Xbar.scala:308:107] wire _requestCIO_T_14 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_0_2 = 1'h1; // @[Xbar.scala:308:107] wire _requestCIO_T_19 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_0_3 = 1'h1; // @[Xbar.scala:308:107] wire _requestCIO_T_24 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_0_4 = 1'h1; // @[Xbar.scala:308:107] wire _requestCIO_T_29 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_0_5 = 1'h1; // @[Xbar.scala:308:107] wire _requestCIO_T_34 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_0_6 = 1'h1; // @[Xbar.scala:308:107] wire _requestCIO_T_39 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_0_7 = 1'h1; // @[Xbar.scala:308:107] wire _requestBOI_T_1 = 1'h1; // @[Parameters.scala:54:32] wire _requestBOI_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _requestBOI_T_3 = 1'h1; // @[Parameters.scala:54:67] wire _requestBOI_T_4 = 1'h1; // @[Parameters.scala:57:20] wire requestBOI_0_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestBOI_T_6 = 1'h1; // @[Parameters.scala:54:32] wire _requestBOI_T_7 = 1'h1; // @[Parameters.scala:56:32] wire _requestBOI_T_8 = 1'h1; // @[Parameters.scala:54:67] wire _requestBOI_T_9 = 1'h1; // @[Parameters.scala:57:20] wire requestBOI_1_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestBOI_T_11 = 1'h1; // @[Parameters.scala:54:32] wire _requestBOI_T_12 = 1'h1; // @[Parameters.scala:56:32] wire _requestBOI_T_13 = 1'h1; // @[Parameters.scala:54:67] wire _requestBOI_T_14 = 1'h1; // @[Parameters.scala:57:20] wire requestBOI_2_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestBOI_T_16 = 1'h1; // @[Parameters.scala:54:32] wire _requestBOI_T_17 = 1'h1; // @[Parameters.scala:56:32] wire _requestBOI_T_18 = 1'h1; // @[Parameters.scala:54:67] wire _requestBOI_T_19 = 1'h1; // @[Parameters.scala:57:20] wire requestBOI_3_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestBOI_T_21 = 1'h1; // @[Parameters.scala:54:32] wire _requestBOI_T_22 = 1'h1; // @[Parameters.scala:56:32] wire _requestBOI_T_23 = 1'h1; // @[Parameters.scala:54:67] wire _requestBOI_T_24 = 1'h1; // @[Parameters.scala:57:20] wire requestBOI_4_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestBOI_T_26 = 1'h1; // @[Parameters.scala:54:32] wire _requestBOI_T_27 = 1'h1; // @[Parameters.scala:56:32] wire _requestBOI_T_28 = 1'h1; // @[Parameters.scala:54:67] wire _requestBOI_T_29 = 1'h1; // @[Parameters.scala:57:20] wire requestBOI_5_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestBOI_T_31 = 1'h1; // @[Parameters.scala:54:32] wire _requestBOI_T_32 = 1'h1; // @[Parameters.scala:56:32] wire _requestBOI_T_33 = 1'h1; // @[Parameters.scala:54:67] wire _requestBOI_T_34 = 1'h1; // @[Parameters.scala:57:20] wire requestBOI_6_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestBOI_T_36 = 1'h1; // @[Parameters.scala:54:32] wire _requestBOI_T_37 = 1'h1; // @[Parameters.scala:56:32] wire _requestBOI_T_38 = 1'h1; // @[Parameters.scala:54:67] wire _requestBOI_T_39 = 1'h1; // @[Parameters.scala:57:20] wire requestBOI_7_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestDOI_T_1 = 1'h1; // @[Parameters.scala:54:32] wire _requestDOI_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _requestDOI_T_3 = 1'h1; // @[Parameters.scala:54:67] wire _requestDOI_T_4 = 1'h1; // @[Parameters.scala:57:20] wire requestDOI_0_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestDOI_T_6 = 1'h1; // @[Parameters.scala:54:32] wire _requestDOI_T_7 = 1'h1; // @[Parameters.scala:56:32] wire _requestDOI_T_8 = 1'h1; // @[Parameters.scala:54:67] wire _requestDOI_T_9 = 1'h1; // @[Parameters.scala:57:20] wire requestDOI_1_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestDOI_T_11 = 1'h1; // @[Parameters.scala:54:32] wire _requestDOI_T_12 = 1'h1; // @[Parameters.scala:56:32] wire _requestDOI_T_13 = 1'h1; // @[Parameters.scala:54:67] wire _requestDOI_T_14 = 1'h1; // @[Parameters.scala:57:20] wire requestDOI_2_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestDOI_T_16 = 1'h1; // @[Parameters.scala:54:32] wire _requestDOI_T_17 = 1'h1; // @[Parameters.scala:56:32] wire _requestDOI_T_18 = 1'h1; // @[Parameters.scala:54:67] wire _requestDOI_T_19 = 1'h1; // @[Parameters.scala:57:20] wire requestDOI_3_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestDOI_T_21 = 1'h1; // @[Parameters.scala:54:32] wire _requestDOI_T_22 = 1'h1; // @[Parameters.scala:56:32] wire _requestDOI_T_23 = 1'h1; // @[Parameters.scala:54:67] wire _requestDOI_T_24 = 1'h1; // @[Parameters.scala:57:20] wire requestDOI_4_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestDOI_T_26 = 1'h1; // @[Parameters.scala:54:32] wire _requestDOI_T_27 = 1'h1; // @[Parameters.scala:56:32] wire _requestDOI_T_28 = 1'h1; // @[Parameters.scala:54:67] wire _requestDOI_T_29 = 1'h1; // @[Parameters.scala:57:20] wire requestDOI_5_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestDOI_T_31 = 1'h1; // @[Parameters.scala:54:32] wire _requestDOI_T_32 = 1'h1; // @[Parameters.scala:56:32] wire _requestDOI_T_33 = 1'h1; // @[Parameters.scala:54:67] wire _requestDOI_T_34 = 1'h1; // @[Parameters.scala:57:20] wire requestDOI_6_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestDOI_T_36 = 1'h1; // @[Parameters.scala:54:32] wire _requestDOI_T_37 = 1'h1; // @[Parameters.scala:56:32] wire _requestDOI_T_38 = 1'h1; // @[Parameters.scala:54:67] wire _requestDOI_T_39 = 1'h1; // @[Parameters.scala:57:20] wire requestDOI_7_0 = 1'h1; // @[Parameters.scala:56:48] wire beatsBO_opdata = 1'h1; // @[Edges.scala:97:28] wire beatsBO_opdata_1 = 1'h1; // @[Edges.scala:97:28] wire beatsBO_opdata_2 = 1'h1; // @[Edges.scala:97:28] wire beatsBO_opdata_3 = 1'h1; // @[Edges.scala:97:28] wire beatsBO_opdata_4 = 1'h1; // @[Edges.scala:97:28] wire beatsBO_opdata_5 = 1'h1; // @[Edges.scala:97:28] wire beatsBO_opdata_6 = 1'h1; // @[Edges.scala:97:28] wire beatsBO_opdata_7 = 1'h1; // @[Edges.scala:97:28] wire beatsDO_opdata_6 = 1'h1; // @[Edges.scala:106:36] wire _portsBIO_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsBIO_filtered_0_valid_T_2 = 1'h1; // @[Xbar.scala:355:54] wire _portsBIO_filtered_0_valid_T_4 = 1'h1; // @[Xbar.scala:355:54] wire _portsBIO_filtered_0_valid_T_6 = 1'h1; // @[Xbar.scala:355:54] wire _portsBIO_filtered_0_valid_T_8 = 1'h1; // @[Xbar.scala:355:54] wire _portsBIO_filtered_0_valid_T_10 = 1'h1; // @[Xbar.scala:355:54] wire _portsBIO_filtered_0_valid_T_12 = 1'h1; // @[Xbar.scala:355:54] wire _portsBIO_filtered_0_valid_T_14 = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_1_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_2_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_3_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_4_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_5_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_6_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_7_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsDIO_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsDIO_filtered_0_valid_T_2 = 1'h1; // @[Xbar.scala:355:54] wire _portsDIO_filtered_0_valid_T_4 = 1'h1; // @[Xbar.scala:355:54] wire _portsDIO_filtered_0_valid_T_6 = 1'h1; // @[Xbar.scala:355:54] wire _portsDIO_filtered_0_valid_T_8 = 1'h1; // @[Xbar.scala:355:54] wire _portsDIO_filtered_0_valid_T_10 = 1'h1; // @[Xbar.scala:355:54] wire _portsDIO_filtered_0_valid_T_12 = 1'h1; // @[Xbar.scala:355:54] wire _portsDIO_filtered_0_valid_T_14 = 1'h1; // @[Xbar.scala:355:54] wire [63:0] _addressC_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _addressC_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _requestBOI_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _requestBOI_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _requestBOI_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _requestBOI_WIRE_6_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_7_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _requestBOI_WIRE_8_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_9_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _requestBOI_WIRE_10_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_11_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _requestBOI_WIRE_12_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_13_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _requestBOI_WIRE_14_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_15_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsBO_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _beatsBO_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsBO_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _beatsBO_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsBO_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _beatsBO_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsBO_WIRE_6_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _beatsBO_WIRE_7_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsBO_WIRE_8_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _beatsBO_WIRE_9_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsBO_WIRE_10_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _beatsBO_WIRE_11_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsBO_WIRE_12_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _beatsBO_WIRE_13_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsBO_WIRE_14_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _beatsBO_WIRE_15_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsCI_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _beatsCI_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _portsBIO_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _portsBIO_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] portsBIO_filtered_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsBIO_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _portsBIO_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] portsBIO_filtered_1_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsBIO_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _portsBIO_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] portsBIO_filtered_2_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsBIO_WIRE_6_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _portsBIO_WIRE_7_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] portsBIO_filtered_3_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsBIO_WIRE_8_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _portsBIO_WIRE_9_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] portsBIO_filtered_4_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsBIO_WIRE_10_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _portsBIO_WIRE_11_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] portsBIO_filtered_5_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsBIO_WIRE_12_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _portsBIO_WIRE_13_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] portsBIO_filtered_6_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsBIO_WIRE_14_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _portsBIO_WIRE_15_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] portsBIO_filtered_7_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsCOI_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _portsCOI_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] portsCOI_filtered_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] portsCOI_filtered_1_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] portsCOI_filtered_2_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] portsCOI_filtered_3_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] portsCOI_filtered_4_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] portsCOI_filtered_5_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] portsCOI_filtered_6_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] portsCOI_filtered_7_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [31:0] _addressC_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _addressC_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _requestCIO_T = 32'h0; // @[Parameters.scala:137:31] wire [31:0] _requestCIO_T_5 = 32'h0; // @[Parameters.scala:137:31] wire [31:0] _requestCIO_T_10 = 32'h0; // @[Parameters.scala:137:31] wire [31:0] _requestCIO_T_15 = 32'h0; // @[Parameters.scala:137:31] wire [31:0] _requestCIO_T_20 = 32'h0; // @[Parameters.scala:137:31] wire [31:0] _requestCIO_T_25 = 32'h0; // @[Parameters.scala:137:31] wire [31:0] _requestCIO_T_30 = 32'h0; // @[Parameters.scala:137:31] wire [31:0] _requestCIO_T_35 = 32'h0; // @[Parameters.scala:137:31] wire [31:0] _requestBOI_WIRE_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _requestBOI_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _requestBOI_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _requestBOI_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _requestBOI_WIRE_4_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _requestBOI_WIRE_5_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _requestBOI_WIRE_6_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _requestBOI_WIRE_7_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _requestBOI_WIRE_8_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _requestBOI_WIRE_9_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _requestBOI_WIRE_10_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _requestBOI_WIRE_11_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _requestBOI_WIRE_12_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _requestBOI_WIRE_13_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _requestBOI_WIRE_14_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _requestBOI_WIRE_15_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _beatsBO_WIRE_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _beatsBO_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _beatsBO_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _beatsBO_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _beatsBO_WIRE_4_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _beatsBO_WIRE_5_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _beatsBO_WIRE_6_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _beatsBO_WIRE_7_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _beatsBO_WIRE_8_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _beatsBO_WIRE_9_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _beatsBO_WIRE_10_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _beatsBO_WIRE_11_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _beatsBO_WIRE_12_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _beatsBO_WIRE_13_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _beatsBO_WIRE_14_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _beatsBO_WIRE_15_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _beatsCI_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _beatsCI_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _portsBIO_WIRE_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _portsBIO_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] portsBIO_filtered_0_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] _portsBIO_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _portsBIO_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] portsBIO_filtered_1_0_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] _portsBIO_WIRE_4_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _portsBIO_WIRE_5_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] portsBIO_filtered_2_0_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] _portsBIO_WIRE_6_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _portsBIO_WIRE_7_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] portsBIO_filtered_3_0_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] _portsBIO_WIRE_8_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _portsBIO_WIRE_9_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] portsBIO_filtered_4_0_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] _portsBIO_WIRE_10_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _portsBIO_WIRE_11_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] portsBIO_filtered_5_0_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] _portsBIO_WIRE_12_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _portsBIO_WIRE_13_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] portsBIO_filtered_6_0_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] _portsBIO_WIRE_14_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _portsBIO_WIRE_15_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] portsBIO_filtered_7_0_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] _portsCOI_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _portsCOI_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] portsCOI_filtered_0_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] portsCOI_filtered_1_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] portsCOI_filtered_2_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] portsCOI_filtered_3_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] portsCOI_filtered_4_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] portsCOI_filtered_5_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] portsCOI_filtered_6_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] portsCOI_filtered_7_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [6:0] _addressC_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _addressC_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _requestBOI_WIRE_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _requestBOI_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] _requestBOI_uncommonBits_T = 7'h0; // @[Parameters.scala:52:29] wire [6:0] requestBOI_uncommonBits = 7'h0; // @[Parameters.scala:52:56] wire [6:0] _requestBOI_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _requestBOI_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] _requestBOI_uncommonBits_T_1 = 7'h0; // @[Parameters.scala:52:29] wire [6:0] requestBOI_uncommonBits_1 = 7'h0; // @[Parameters.scala:52:56] wire [6:0] _requestBOI_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _requestBOI_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] _requestBOI_uncommonBits_T_2 = 7'h0; // @[Parameters.scala:52:29] wire [6:0] requestBOI_uncommonBits_2 = 7'h0; // @[Parameters.scala:52:56] wire [6:0] _requestBOI_WIRE_6_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _requestBOI_WIRE_7_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] _requestBOI_uncommonBits_T_3 = 7'h0; // @[Parameters.scala:52:29] wire [6:0] requestBOI_uncommonBits_3 = 7'h0; // @[Parameters.scala:52:56] wire [6:0] _requestBOI_WIRE_8_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _requestBOI_WIRE_9_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] _requestBOI_uncommonBits_T_4 = 7'h0; // @[Parameters.scala:52:29] wire [6:0] requestBOI_uncommonBits_4 = 7'h0; // @[Parameters.scala:52:56] wire [6:0] _requestBOI_WIRE_10_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _requestBOI_WIRE_11_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] _requestBOI_uncommonBits_T_5 = 7'h0; // @[Parameters.scala:52:29] wire [6:0] requestBOI_uncommonBits_5 = 7'h0; // @[Parameters.scala:52:56] wire [6:0] _requestBOI_WIRE_12_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _requestBOI_WIRE_13_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] _requestBOI_uncommonBits_T_6 = 7'h0; // @[Parameters.scala:52:29] wire [6:0] requestBOI_uncommonBits_6 = 7'h0; // @[Parameters.scala:52:56] wire [6:0] _requestBOI_WIRE_14_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _requestBOI_WIRE_15_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] _requestBOI_uncommonBits_T_7 = 7'h0; // @[Parameters.scala:52:29] wire [6:0] requestBOI_uncommonBits_7 = 7'h0; // @[Parameters.scala:52:56] wire [6:0] _beatsBO_WIRE_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _beatsBO_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] _beatsBO_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _beatsBO_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] _beatsBO_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _beatsBO_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] _beatsBO_WIRE_6_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _beatsBO_WIRE_7_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] _beatsBO_WIRE_8_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _beatsBO_WIRE_9_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] _beatsBO_WIRE_10_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _beatsBO_WIRE_11_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] _beatsBO_WIRE_12_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _beatsBO_WIRE_13_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] _beatsBO_WIRE_14_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _beatsBO_WIRE_15_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] _beatsCI_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _beatsCI_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _portsBIO_WIRE_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _portsBIO_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] portsBIO_filtered_0_bits_source = 7'h0; // @[Xbar.scala:352:24] wire [6:0] _portsBIO_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _portsBIO_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] portsBIO_filtered_1_0_bits_source = 7'h0; // @[Xbar.scala:352:24] wire [6:0] _portsBIO_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _portsBIO_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] portsBIO_filtered_2_0_bits_source = 7'h0; // @[Xbar.scala:352:24] wire [6:0] _portsBIO_WIRE_6_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _portsBIO_WIRE_7_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] portsBIO_filtered_3_0_bits_source = 7'h0; // @[Xbar.scala:352:24] wire [6:0] _portsBIO_WIRE_8_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _portsBIO_WIRE_9_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] portsBIO_filtered_4_0_bits_source = 7'h0; // @[Xbar.scala:352:24] wire [6:0] _portsBIO_WIRE_10_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _portsBIO_WIRE_11_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] portsBIO_filtered_5_0_bits_source = 7'h0; // @[Xbar.scala:352:24] wire [6:0] _portsBIO_WIRE_12_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _portsBIO_WIRE_13_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] portsBIO_filtered_6_0_bits_source = 7'h0; // @[Xbar.scala:352:24] wire [6:0] _portsBIO_WIRE_14_bits_source = 7'h0; // @[Bundles.scala:264:74] wire [6:0] _portsBIO_WIRE_15_bits_source = 7'h0; // @[Bundles.scala:264:61] wire [6:0] portsBIO_filtered_7_0_bits_source = 7'h0; // @[Xbar.scala:352:24] wire [6:0] _portsCOI_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _portsCOI_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] portsCOI_filtered_0_bits_source = 7'h0; // @[Xbar.scala:352:24] wire [6:0] portsCOI_filtered_1_bits_source = 7'h0; // @[Xbar.scala:352:24] wire [6:0] portsCOI_filtered_2_bits_source = 7'h0; // @[Xbar.scala:352:24] wire [6:0] portsCOI_filtered_3_bits_source = 7'h0; // @[Xbar.scala:352:24] wire [6:0] portsCOI_filtered_4_bits_source = 7'h0; // @[Xbar.scala:352:24] wire [6:0] portsCOI_filtered_5_bits_source = 7'h0; // @[Xbar.scala:352:24] wire [6:0] portsCOI_filtered_6_bits_source = 7'h0; // @[Xbar.scala:352:24] wire [6:0] portsCOI_filtered_7_bits_source = 7'h0; // @[Xbar.scala:352:24] wire [3:0] _addressC_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _addressC_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _requestBOI_WIRE_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _requestBOI_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _requestBOI_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _requestBOI_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _requestBOI_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _requestBOI_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _requestBOI_WIRE_6_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _requestBOI_WIRE_7_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _requestBOI_WIRE_8_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _requestBOI_WIRE_9_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _requestBOI_WIRE_10_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _requestBOI_WIRE_11_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _requestBOI_WIRE_12_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _requestBOI_WIRE_13_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _requestBOI_WIRE_14_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _requestBOI_WIRE_15_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _beatsBO_WIRE_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _beatsBO_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _beatsBO_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _beatsBO_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _beatsBO_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _beatsBO_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _beatsBO_WIRE_6_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _beatsBO_WIRE_7_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _beatsBO_WIRE_8_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _beatsBO_WIRE_9_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _beatsBO_WIRE_10_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _beatsBO_WIRE_11_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _beatsBO_WIRE_12_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _beatsBO_WIRE_13_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _beatsBO_WIRE_14_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _beatsBO_WIRE_15_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _beatsCI_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _beatsCI_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _portsBIO_WIRE_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _portsBIO_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] portsBIO_filtered_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsBIO_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _portsBIO_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] portsBIO_filtered_1_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsBIO_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _portsBIO_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] portsBIO_filtered_2_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsBIO_WIRE_6_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _portsBIO_WIRE_7_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] portsBIO_filtered_3_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsBIO_WIRE_8_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _portsBIO_WIRE_9_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] portsBIO_filtered_4_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsBIO_WIRE_10_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _portsBIO_WIRE_11_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] portsBIO_filtered_5_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsBIO_WIRE_12_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _portsBIO_WIRE_13_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] portsBIO_filtered_6_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsBIO_WIRE_14_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _portsBIO_WIRE_15_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] portsBIO_filtered_7_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsCOI_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _portsCOI_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] portsCOI_filtered_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] portsCOI_filtered_1_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] portsCOI_filtered_2_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] portsCOI_filtered_3_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] portsCOI_filtered_4_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] portsCOI_filtered_5_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] portsCOI_filtered_6_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] portsCOI_filtered_7_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [2:0] _addressC_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _addressC_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _addressC_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _addressC_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _requestBOI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _requestBOI_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _requestBOI_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _requestBOI_WIRE_6_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_7_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _requestBOI_WIRE_8_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_9_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _requestBOI_WIRE_10_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_11_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _requestBOI_WIRE_12_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_13_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _requestBOI_WIRE_14_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_15_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _beatsBO_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _beatsBO_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] beatsBO_decode_1 = 3'h0; // @[Edges.scala:220:59] wire [2:0] beatsBO_1 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _beatsBO_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] beatsBO_decode_2 = 3'h0; // @[Edges.scala:220:59] wire [2:0] beatsBO_2 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _beatsBO_WIRE_6_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_7_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] beatsBO_decode_3 = 3'h0; // @[Edges.scala:220:59] wire [2:0] beatsBO_3 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _beatsBO_WIRE_8_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_9_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] beatsBO_decode_4 = 3'h0; // @[Edges.scala:220:59] wire [2:0] beatsBO_4 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _beatsBO_WIRE_10_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_11_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] beatsBO_decode_5 = 3'h0; // @[Edges.scala:220:59] wire [2:0] beatsBO_5 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _beatsBO_WIRE_12_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_13_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] beatsBO_decode_6 = 3'h0; // @[Edges.scala:220:59] wire [2:0] beatsBO_6 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _beatsBO_WIRE_14_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_15_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] beatsBO_decode_7 = 3'h0; // @[Edges.scala:220:59] wire [2:0] beatsBO_7 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _beatsCI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _beatsCI_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _beatsCI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _beatsCI_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _portsBIO_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] portsBIO_filtered_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsBIO_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] portsBIO_filtered_1_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsBIO_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] portsBIO_filtered_2_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsBIO_WIRE_6_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_7_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] portsBIO_filtered_3_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsBIO_WIRE_8_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_9_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] portsBIO_filtered_4_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsBIO_WIRE_10_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_11_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] portsBIO_filtered_5_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsBIO_WIRE_12_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_13_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] portsBIO_filtered_6_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsBIO_WIRE_14_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_15_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] portsBIO_filtered_7_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsCOI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _portsCOI_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _portsCOI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _portsCOI_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] portsCOI_filtered_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_0_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_1_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_1_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_2_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_2_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_3_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_3_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_4_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_4_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_5_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_5_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_6_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_6_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_7_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_7_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [7:0] _requestBOI_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_1_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_WIRE_2_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_3_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_WIRE_4_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_5_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_WIRE_6_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_7_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_WIRE_8_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_9_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_WIRE_10_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_11_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_WIRE_12_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_13_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_WIRE_14_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_15_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsBO_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_1_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsBO_WIRE_2_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_3_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsBO_WIRE_4_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_5_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsBO_WIRE_6_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_7_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsBO_WIRE_8_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_9_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsBO_WIRE_10_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_11_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsBO_WIRE_12_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_13_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsBO_WIRE_14_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_15_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _portsBIO_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_1_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] portsBIO_filtered_0_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [7:0] _portsBIO_WIRE_2_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_3_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] portsBIO_filtered_1_0_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [7:0] _portsBIO_WIRE_4_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_5_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] portsBIO_filtered_2_0_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [7:0] _portsBIO_WIRE_6_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_7_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] portsBIO_filtered_3_0_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [7:0] _portsBIO_WIRE_8_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_9_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] portsBIO_filtered_4_0_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [7:0] _portsBIO_WIRE_10_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_11_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] portsBIO_filtered_5_0_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [7:0] _portsBIO_WIRE_12_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_13_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] portsBIO_filtered_6_0_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [7:0] _portsBIO_WIRE_14_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_15_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] portsBIO_filtered_7_0_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [8:0] beatsBO_decode = 9'h0; // @[Edges.scala:220:59] wire [8:0] beatsBO_0 = 9'h0; // @[Edges.scala:221:14] wire [8:0] beatsCI_decode = 9'h0; // @[Edges.scala:220:59] wire [8:0] beatsCI_0 = 9'h0; // @[Edges.scala:221:14] wire [11:0] _beatsBO_decode_T_2 = 12'h0; // @[package.scala:243:46] wire [11:0] _beatsCI_decode_T_2 = 12'h0; // @[package.scala:243:46] wire [11:0] _beatsBO_decode_T_1 = 12'hFFF; // @[package.scala:243:76] wire [11:0] _beatsCI_decode_T_1 = 12'hFFF; // @[package.scala:243:76] wire [26:0] _beatsBO_decode_T = 27'hFFF; // @[package.scala:243:71] wire [26:0] _beatsCI_decode_T = 27'hFFF; // @[package.scala:243:71] wire [5:0] _beatsBO_decode_T_5 = 6'h0; // @[package.scala:243:46] wire [5:0] _beatsBO_decode_T_8 = 6'h0; // @[package.scala:243:46] wire [5:0] _beatsBO_decode_T_11 = 6'h0; // @[package.scala:243:46] wire [5:0] _beatsBO_decode_T_14 = 6'h0; // @[package.scala:243:46] wire [5:0] _beatsBO_decode_T_17 = 6'h0; // @[package.scala:243:46] wire [5:0] _beatsBO_decode_T_20 = 6'h0; // @[package.scala:243:46] wire [5:0] _beatsBO_decode_T_23 = 6'h0; // @[package.scala:243:46] wire [5:0] _beatsBO_decode_T_4 = 6'h3F; // @[package.scala:243:76] wire [5:0] _beatsBO_decode_T_7 = 6'h3F; // @[package.scala:243:76] wire [5:0] _beatsBO_decode_T_10 = 6'h3F; // @[package.scala:243:76] wire [5:0] _beatsBO_decode_T_13 = 6'h3F; // @[package.scala:243:76] wire [5:0] _beatsBO_decode_T_16 = 6'h3F; // @[package.scala:243:76] wire [5:0] _beatsBO_decode_T_19 = 6'h3F; // @[package.scala:243:76] wire [5:0] _beatsBO_decode_T_22 = 6'h3F; // @[package.scala:243:76] wire [20:0] _beatsBO_decode_T_3 = 21'h3F; // @[package.scala:243:71] wire [20:0] _beatsBO_decode_T_6 = 21'h3F; // @[package.scala:243:71] wire [20:0] _beatsBO_decode_T_9 = 21'h3F; // @[package.scala:243:71] wire [20:0] _beatsBO_decode_T_12 = 21'h3F; // @[package.scala:243:71] wire [20:0] _beatsBO_decode_T_15 = 21'h3F; // @[package.scala:243:71] wire [20:0] _beatsBO_decode_T_18 = 21'h3F; // @[package.scala:243:71] wire [20:0] _beatsBO_decode_T_21 = 21'h3F; // @[package.scala:243:71] wire [32:0] _requestCIO_T_1 = 33'h0; // @[Parameters.scala:137:41] wire [32:0] _requestCIO_T_2 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_3 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_6 = 33'h0; // @[Parameters.scala:137:41] wire [32:0] _requestCIO_T_7 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_8 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_11 = 33'h0; // @[Parameters.scala:137:41] wire [32:0] _requestCIO_T_12 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_13 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_16 = 33'h0; // @[Parameters.scala:137:41] wire [32:0] _requestCIO_T_17 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_18 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_21 = 33'h0; // @[Parameters.scala:137:41] wire [32:0] _requestCIO_T_22 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_23 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_26 = 33'h0; // @[Parameters.scala:137:41] wire [32:0] _requestCIO_T_27 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_28 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_31 = 33'h0; // @[Parameters.scala:137:41] wire [32:0] _requestCIO_T_32 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_33 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_36 = 33'h0; // @[Parameters.scala:137:41] wire [32:0] _requestCIO_T_37 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_38 = 33'h0; // @[Parameters.scala:137:46] wire anonIn_a_ready; // @[MixedNode.scala:551:17] wire anonIn_a_valid = auto_anon_in_a_valid_0; // @[Xbar.scala:74:9] wire [2:0] anonIn_a_bits_opcode = auto_anon_in_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] anonIn_a_bits_param = auto_anon_in_a_bits_param_0; // @[Xbar.scala:74:9] wire [3:0] anonIn_a_bits_size = auto_anon_in_a_bits_size_0; // @[Xbar.scala:74:9] wire [6:0] anonIn_a_bits_source = auto_anon_in_a_bits_source_0; // @[Xbar.scala:74:9] wire [31:0] anonIn_a_bits_address = auto_anon_in_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] anonIn_a_bits_mask = auto_anon_in_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] anonIn_a_bits_data = auto_anon_in_a_bits_data_0; // @[Xbar.scala:74:9] wire anonIn_a_bits_corrupt = auto_anon_in_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire anonIn_d_ready = auto_anon_in_d_ready_0; // @[Xbar.scala:74:9] wire anonIn_d_valid; // @[MixedNode.scala:551:17] wire [2:0] anonIn_d_bits_opcode; // @[MixedNode.scala:551:17] wire [1:0] anonIn_d_bits_param; // @[MixedNode.scala:551:17] wire [3:0] anonIn_d_bits_size; // @[MixedNode.scala:551:17] wire [6:0] anonIn_d_bits_source; // @[MixedNode.scala:551:17] wire anonIn_d_bits_sink; // @[MixedNode.scala:551:17] wire anonIn_d_bits_denied; // @[MixedNode.scala:551:17] wire [63:0] anonIn_d_bits_data; // @[MixedNode.scala:551:17] wire anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17] wire x1_anonOut_6_a_ready = auto_anon_out_7_a_ready_0; // @[Xbar.scala:74:9] wire x1_anonOut_6_a_valid; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_6_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_6_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_6_a_bits_size; // @[MixedNode.scala:542:17] wire [6:0] x1_anonOut_6_a_bits_source; // @[MixedNode.scala:542:17] wire [20:0] x1_anonOut_6_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] x1_anonOut_6_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] x1_anonOut_6_a_bits_data; // @[MixedNode.scala:542:17] wire x1_anonOut_6_a_bits_corrupt; // @[MixedNode.scala:542:17] wire x1_anonOut_6_d_ready; // @[MixedNode.scala:542:17] wire x1_anonOut_6_d_valid = auto_anon_out_7_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_6_d_bits_opcode = auto_anon_out_7_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [1:0] x1_anonOut_6_d_bits_param = auto_anon_out_7_d_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_6_d_bits_size = auto_anon_out_7_d_bits_size_0; // @[Xbar.scala:74:9] wire [6:0] x1_anonOut_6_d_bits_source = auto_anon_out_7_d_bits_source_0; // @[Xbar.scala:74:9] wire x1_anonOut_6_d_bits_sink = auto_anon_out_7_d_bits_sink_0; // @[Xbar.scala:74:9] wire x1_anonOut_6_d_bits_denied = auto_anon_out_7_d_bits_denied_0; // @[Xbar.scala:74:9] wire [63:0] x1_anonOut_6_d_bits_data = auto_anon_out_7_d_bits_data_0; // @[Xbar.scala:74:9] wire x1_anonOut_6_d_bits_corrupt = auto_anon_out_7_d_bits_corrupt_0; // @[Xbar.scala:74:9] wire x1_anonOut_5_a_ready = auto_anon_out_6_a_ready_0; // @[Xbar.scala:74:9] wire x1_anonOut_5_a_valid; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_5_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_5_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_5_a_bits_size; // @[MixedNode.scala:542:17] wire [6:0] x1_anonOut_5_a_bits_source; // @[MixedNode.scala:542:17] wire [16:0] x1_anonOut_5_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] x1_anonOut_5_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] x1_anonOut_5_a_bits_data; // @[MixedNode.scala:542:17] wire x1_anonOut_5_a_bits_corrupt; // @[MixedNode.scala:542:17] wire x1_anonOut_5_d_ready; // @[MixedNode.scala:542:17] wire x1_anonOut_5_d_valid = auto_anon_out_6_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_5_d_bits_size = auto_anon_out_6_d_bits_size_0; // @[Xbar.scala:74:9] wire [6:0] x1_anonOut_5_d_bits_source = auto_anon_out_6_d_bits_source_0; // @[Xbar.scala:74:9] wire [63:0] x1_anonOut_5_d_bits_data = auto_anon_out_6_d_bits_data_0; // @[Xbar.scala:74:9] wire x1_anonOut_4_a_ready = auto_anon_out_5_a_ready_0; // @[Xbar.scala:74:9] wire x1_anonOut_4_a_valid; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_4_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_4_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_4_a_bits_size; // @[MixedNode.scala:542:17] wire [6:0] x1_anonOut_4_a_bits_source; // @[MixedNode.scala:542:17] wire [31:0] x1_anonOut_4_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] x1_anonOut_4_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] x1_anonOut_4_a_bits_data; // @[MixedNode.scala:542:17] wire x1_anonOut_4_a_bits_corrupt; // @[MixedNode.scala:542:17] wire x1_anonOut_4_d_ready; // @[MixedNode.scala:542:17] wire x1_anonOut_4_d_valid = auto_anon_out_5_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_4_d_bits_opcode = auto_anon_out_5_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [1:0] x1_anonOut_4_d_bits_param = auto_anon_out_5_d_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_4_d_bits_size = auto_anon_out_5_d_bits_size_0; // @[Xbar.scala:74:9] wire [6:0] x1_anonOut_4_d_bits_source = auto_anon_out_5_d_bits_source_0; // @[Xbar.scala:74:9] wire x1_anonOut_4_d_bits_sink = auto_anon_out_5_d_bits_sink_0; // @[Xbar.scala:74:9] wire x1_anonOut_4_d_bits_denied = auto_anon_out_5_d_bits_denied_0; // @[Xbar.scala:74:9] wire [63:0] x1_anonOut_4_d_bits_data = auto_anon_out_5_d_bits_data_0; // @[Xbar.scala:74:9] wire x1_anonOut_4_d_bits_corrupt = auto_anon_out_5_d_bits_corrupt_0; // @[Xbar.scala:74:9] wire x1_anonOut_3_a_ready = auto_anon_out_4_a_ready_0; // @[Xbar.scala:74:9] wire x1_anonOut_3_a_valid; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_3_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_3_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_3_a_bits_size; // @[MixedNode.scala:542:17] wire [6:0] x1_anonOut_3_a_bits_source; // @[MixedNode.scala:542:17] wire [11:0] x1_anonOut_3_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] x1_anonOut_3_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] x1_anonOut_3_a_bits_data; // @[MixedNode.scala:542:17] wire x1_anonOut_3_a_bits_corrupt; // @[MixedNode.scala:542:17] wire x1_anonOut_3_d_ready; // @[MixedNode.scala:542:17] wire x1_anonOut_3_d_valid = auto_anon_out_4_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_3_d_bits_opcode = auto_anon_out_4_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_3_d_bits_size = auto_anon_out_4_d_bits_size_0; // @[Xbar.scala:74:9] wire [6:0] x1_anonOut_3_d_bits_source = auto_anon_out_4_d_bits_source_0; // @[Xbar.scala:74:9] wire [63:0] x1_anonOut_3_d_bits_data = auto_anon_out_4_d_bits_data_0; // @[Xbar.scala:74:9] wire x1_anonOut_2_a_ready = auto_anon_out_3_a_ready_0; // @[Xbar.scala:74:9] wire x1_anonOut_2_a_valid; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_2_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_2_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_2_a_bits_size; // @[MixedNode.scala:542:17] wire [6:0] x1_anonOut_2_a_bits_source; // @[MixedNode.scala:542:17] wire [27:0] x1_anonOut_2_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] x1_anonOut_2_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] x1_anonOut_2_a_bits_data; // @[MixedNode.scala:542:17] wire x1_anonOut_2_a_bits_corrupt; // @[MixedNode.scala:542:17] wire x1_anonOut_2_d_ready; // @[MixedNode.scala:542:17] wire x1_anonOut_2_d_valid = auto_anon_out_3_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_2_d_bits_opcode = auto_anon_out_3_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_2_d_bits_size = auto_anon_out_3_d_bits_size_0; // @[Xbar.scala:74:9] wire [6:0] x1_anonOut_2_d_bits_source = auto_anon_out_3_d_bits_source_0; // @[Xbar.scala:74:9] wire [63:0] x1_anonOut_2_d_bits_data = auto_anon_out_3_d_bits_data_0; // @[Xbar.scala:74:9] wire x1_anonOut_1_a_ready = auto_anon_out_2_a_ready_0; // @[Xbar.scala:74:9] wire x1_anonOut_1_a_valid; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_1_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_1_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_1_a_bits_size; // @[MixedNode.scala:542:17] wire [6:0] x1_anonOut_1_a_bits_source; // @[MixedNode.scala:542:17] wire [25:0] x1_anonOut_1_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] x1_anonOut_1_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] x1_anonOut_1_a_bits_data; // @[MixedNode.scala:542:17] wire x1_anonOut_1_a_bits_corrupt; // @[MixedNode.scala:542:17] wire x1_anonOut_1_d_ready; // @[MixedNode.scala:542:17] wire x1_anonOut_1_d_valid = auto_anon_out_2_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_1_d_bits_opcode = auto_anon_out_2_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_1_d_bits_size = auto_anon_out_2_d_bits_size_0; // @[Xbar.scala:74:9] wire [6:0] x1_anonOut_1_d_bits_source = auto_anon_out_2_d_bits_source_0; // @[Xbar.scala:74:9] wire [63:0] x1_anonOut_1_d_bits_data = auto_anon_out_2_d_bits_data_0; // @[Xbar.scala:74:9] wire x1_anonOut_a_ready = auto_anon_out_1_a_ready_0; // @[Xbar.scala:74:9] wire x1_anonOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_a_bits_size; // @[MixedNode.scala:542:17] wire [6:0] x1_anonOut_a_bits_source; // @[MixedNode.scala:542:17] wire [28:0] x1_anonOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] x1_anonOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] x1_anonOut_a_bits_data; // @[MixedNode.scala:542:17] wire x1_anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17] wire x1_anonOut_d_ready; // @[MixedNode.scala:542:17] wire x1_anonOut_d_valid = auto_anon_out_1_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_d_bits_opcode = auto_anon_out_1_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [1:0] x1_anonOut_d_bits_param = auto_anon_out_1_d_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_d_bits_size = auto_anon_out_1_d_bits_size_0; // @[Xbar.scala:74:9] wire [6:0] x1_anonOut_d_bits_source = auto_anon_out_1_d_bits_source_0; // @[Xbar.scala:74:9] wire x1_anonOut_d_bits_sink = auto_anon_out_1_d_bits_sink_0; // @[Xbar.scala:74:9] wire x1_anonOut_d_bits_denied = auto_anon_out_1_d_bits_denied_0; // @[Xbar.scala:74:9] wire [63:0] x1_anonOut_d_bits_data = auto_anon_out_1_d_bits_data_0; // @[Xbar.scala:74:9] wire x1_anonOut_d_bits_corrupt = auto_anon_out_1_d_bits_corrupt_0; // @[Xbar.scala:74:9] wire anonOut_a_ready = auto_anon_out_0_a_ready_0; // @[Xbar.scala:74:9] wire anonOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] anonOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] anonOut_a_bits_param; // @[MixedNode.scala:542:17] wire [3:0] anonOut_a_bits_size; // @[MixedNode.scala:542:17] wire [6:0] anonOut_a_bits_source; // @[MixedNode.scala:542:17] wire [13:0] anonOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] anonOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] anonOut_a_bits_data; // @[MixedNode.scala:542:17] wire anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17] wire anonOut_d_ready; // @[MixedNode.scala:542:17] wire anonOut_d_valid = auto_anon_out_0_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] anonOut_d_bits_opcode = auto_anon_out_0_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [1:0] anonOut_d_bits_param = auto_anon_out_0_d_bits_param_0; // @[Xbar.scala:74:9] wire [3:0] anonOut_d_bits_size = auto_anon_out_0_d_bits_size_0; // @[Xbar.scala:74:9] wire [6:0] anonOut_d_bits_source = auto_anon_out_0_d_bits_source_0; // @[Xbar.scala:74:9] wire anonOut_d_bits_sink = auto_anon_out_0_d_bits_sink_0; // @[Xbar.scala:74:9] wire anonOut_d_bits_denied = auto_anon_out_0_d_bits_denied_0; // @[Xbar.scala:74:9] wire [63:0] anonOut_d_bits_data = auto_anon_out_0_d_bits_data_0; // @[Xbar.scala:74:9] wire anonOut_d_bits_corrupt = auto_anon_out_0_d_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_in_a_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [1:0] auto_anon_in_d_bits_param_0; // @[Xbar.scala:74:9] wire [3:0] auto_anon_in_d_bits_size_0; // @[Xbar.scala:74:9] wire [6:0] auto_anon_in_d_bits_source_0; // @[Xbar.scala:74:9] wire auto_anon_in_d_bits_sink_0; // @[Xbar.scala:74:9] wire auto_anon_in_d_bits_denied_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_in_d_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_in_d_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_in_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_7_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_7_a_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_7_a_bits_size_0; // @[Xbar.scala:74:9] wire [6:0] auto_anon_out_7_a_bits_source_0; // @[Xbar.scala:74:9] wire [20:0] auto_anon_out_7_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_7_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_7_a_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_out_7_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_out_7_a_valid_0; // @[Xbar.scala:74:9] wire auto_anon_out_7_d_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_6_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_6_a_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_6_a_bits_size_0; // @[Xbar.scala:74:9] wire [6:0] auto_anon_out_6_a_bits_source_0; // @[Xbar.scala:74:9] wire [16:0] auto_anon_out_6_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_6_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_6_a_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_out_6_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_out_6_a_valid_0; // @[Xbar.scala:74:9] wire auto_anon_out_6_d_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_5_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_5_a_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_5_a_bits_size_0; // @[Xbar.scala:74:9] wire [6:0] auto_anon_out_5_a_bits_source_0; // @[Xbar.scala:74:9] wire [31:0] auto_anon_out_5_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_5_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_5_a_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_out_5_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_out_5_a_valid_0; // @[Xbar.scala:74:9] wire auto_anon_out_5_d_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_4_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_4_a_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_4_a_bits_size_0; // @[Xbar.scala:74:9] wire [6:0] auto_anon_out_4_a_bits_source_0; // @[Xbar.scala:74:9] wire [11:0] auto_anon_out_4_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_4_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_4_a_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_out_4_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_out_4_a_valid_0; // @[Xbar.scala:74:9] wire auto_anon_out_4_d_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_3_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_3_a_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_3_a_bits_size_0; // @[Xbar.scala:74:9] wire [6:0] auto_anon_out_3_a_bits_source_0; // @[Xbar.scala:74:9] wire [27:0] auto_anon_out_3_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_3_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_3_a_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_out_3_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_out_3_a_valid_0; // @[Xbar.scala:74:9] wire auto_anon_out_3_d_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_2_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_2_a_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_2_a_bits_size_0; // @[Xbar.scala:74:9] wire [6:0] auto_anon_out_2_a_bits_source_0; // @[Xbar.scala:74:9] wire [25:0] auto_anon_out_2_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_2_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_2_a_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_out_2_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_out_2_a_valid_0; // @[Xbar.scala:74:9] wire auto_anon_out_2_d_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_1_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_1_a_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_1_a_bits_size_0; // @[Xbar.scala:74:9] wire [6:0] auto_anon_out_1_a_bits_source_0; // @[Xbar.scala:74:9] wire [28:0] auto_anon_out_1_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_1_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_1_a_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_out_1_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_out_1_a_valid_0; // @[Xbar.scala:74:9] wire auto_anon_out_1_d_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_0_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_0_a_bits_param_0; // @[Xbar.scala:74:9] wire [3:0] auto_anon_out_0_a_bits_size_0; // @[Xbar.scala:74:9] wire [6:0] auto_anon_out_0_a_bits_source_0; // @[Xbar.scala:74:9] wire [13:0] auto_anon_out_0_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_0_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_0_a_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_out_0_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_out_0_a_valid_0; // @[Xbar.scala:74:9] wire auto_anon_out_0_d_ready_0; // @[Xbar.scala:74:9] wire in_0_a_ready; // @[Xbar.scala:159:18] assign auto_anon_in_a_ready_0 = anonIn_a_ready; // @[Xbar.scala:74:9] wire in_0_a_valid = anonIn_a_valid; // @[Xbar.scala:159:18] wire [2:0] in_0_a_bits_opcode = anonIn_a_bits_opcode; // @[Xbar.scala:159:18] wire [2:0] in_0_a_bits_param = anonIn_a_bits_param; // @[Xbar.scala:159:18] wire [3:0] in_0_a_bits_size = anonIn_a_bits_size; // @[Xbar.scala:159:18] wire [6:0] _in_0_a_bits_source_T = anonIn_a_bits_source; // @[Xbar.scala:166:55] wire [31:0] in_0_a_bits_address = anonIn_a_bits_address; // @[Xbar.scala:159:18] wire [7:0] in_0_a_bits_mask = anonIn_a_bits_mask; // @[Xbar.scala:159:18] wire [63:0] in_0_a_bits_data = anonIn_a_bits_data; // @[Xbar.scala:159:18] wire in_0_a_bits_corrupt = anonIn_a_bits_corrupt; // @[Xbar.scala:159:18] wire in_0_d_ready = anonIn_d_ready; // @[Xbar.scala:159:18] wire in_0_d_valid; // @[Xbar.scala:159:18] assign auto_anon_in_d_valid_0 = anonIn_d_valid; // @[Xbar.scala:74:9] wire [2:0] in_0_d_bits_opcode; // @[Xbar.scala:159:18] assign auto_anon_in_d_bits_opcode_0 = anonIn_d_bits_opcode; // @[Xbar.scala:74:9] wire [1:0] in_0_d_bits_param; // @[Xbar.scala:159:18] assign auto_anon_in_d_bits_param_0 = anonIn_d_bits_param; // @[Xbar.scala:74:9] wire [3:0] in_0_d_bits_size; // @[Xbar.scala:159:18] assign auto_anon_in_d_bits_size_0 = anonIn_d_bits_size; // @[Xbar.scala:74:9] wire [6:0] _anonIn_d_bits_source_T; // @[Xbar.scala:156:69] assign auto_anon_in_d_bits_source_0 = anonIn_d_bits_source; // @[Xbar.scala:74:9] wire in_0_d_bits_sink; // @[Xbar.scala:159:18] assign auto_anon_in_d_bits_sink_0 = anonIn_d_bits_sink; // @[Xbar.scala:74:9] wire in_0_d_bits_denied; // @[Xbar.scala:159:18] assign auto_anon_in_d_bits_denied_0 = anonIn_d_bits_denied; // @[Xbar.scala:74:9] wire [63:0] in_0_d_bits_data; // @[Xbar.scala:159:18] assign auto_anon_in_d_bits_data_0 = anonIn_d_bits_data; // @[Xbar.scala:74:9] wire in_0_d_bits_corrupt; // @[Xbar.scala:159:18] assign auto_anon_in_d_bits_corrupt_0 = anonIn_d_bits_corrupt; // @[Xbar.scala:74:9] wire out_0_a_ready = anonOut_a_ready; // @[Xbar.scala:216:19] wire out_0_a_valid; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_valid_0 = anonOut_a_valid; // @[Xbar.scala:74:9] wire [2:0] out_0_a_bits_opcode; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_bits_opcode_0 = anonOut_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] out_0_a_bits_param; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_bits_param_0 = anonOut_a_bits_param; // @[Xbar.scala:74:9] wire [3:0] out_0_a_bits_size; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_bits_size_0 = anonOut_a_bits_size; // @[Xbar.scala:74:9] wire [6:0] out_0_a_bits_source; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_bits_source_0 = anonOut_a_bits_source; // @[Xbar.scala:74:9] assign auto_anon_out_0_a_bits_address_0 = anonOut_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] out_0_a_bits_mask; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_bits_mask_0 = anonOut_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] out_0_a_bits_data; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_bits_data_0 = anonOut_a_bits_data; // @[Xbar.scala:74:9] wire out_0_a_bits_corrupt; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_bits_corrupt_0 = anonOut_a_bits_corrupt; // @[Xbar.scala:74:9] wire out_0_d_ready; // @[Xbar.scala:216:19] assign auto_anon_out_0_d_ready_0 = anonOut_d_ready; // @[Xbar.scala:74:9] wire out_0_d_valid = anonOut_d_valid; // @[Xbar.scala:216:19] wire [2:0] out_0_d_bits_opcode = anonOut_d_bits_opcode; // @[Xbar.scala:216:19] wire [1:0] out_0_d_bits_param = anonOut_d_bits_param; // @[Xbar.scala:216:19] wire [3:0] out_0_d_bits_size = anonOut_d_bits_size; // @[Xbar.scala:216:19] wire [6:0] out_0_d_bits_source = anonOut_d_bits_source; // @[Xbar.scala:216:19] wire _out_0_d_bits_sink_T = anonOut_d_bits_sink; // @[Xbar.scala:251:53] wire out_0_d_bits_denied = anonOut_d_bits_denied; // @[Xbar.scala:216:19] wire [63:0] out_0_d_bits_data = anonOut_d_bits_data; // @[Xbar.scala:216:19] wire out_0_d_bits_corrupt = anonOut_d_bits_corrupt; // @[Xbar.scala:216:19] wire out_1_a_ready = x1_anonOut_a_ready; // @[Xbar.scala:216:19] wire out_1_a_valid; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_valid_0 = x1_anonOut_a_valid; // @[Xbar.scala:74:9] wire [2:0] out_1_a_bits_opcode; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_bits_opcode_0 = x1_anonOut_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] out_1_a_bits_param; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_bits_param_0 = x1_anonOut_a_bits_param; // @[Xbar.scala:74:9] assign auto_anon_out_1_a_bits_size_0 = x1_anonOut_a_bits_size; // @[Xbar.scala:74:9] wire [6:0] out_1_a_bits_source; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_bits_source_0 = x1_anonOut_a_bits_source; // @[Xbar.scala:74:9] assign auto_anon_out_1_a_bits_address_0 = x1_anonOut_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] out_1_a_bits_mask; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_bits_mask_0 = x1_anonOut_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] out_1_a_bits_data; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_bits_data_0 = x1_anonOut_a_bits_data; // @[Xbar.scala:74:9] wire out_1_a_bits_corrupt; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_bits_corrupt_0 = x1_anonOut_a_bits_corrupt; // @[Xbar.scala:74:9] wire out_1_d_ready; // @[Xbar.scala:216:19] assign auto_anon_out_1_d_ready_0 = x1_anonOut_d_ready; // @[Xbar.scala:74:9] wire out_1_d_valid = x1_anonOut_d_valid; // @[Xbar.scala:216:19] wire [2:0] out_1_d_bits_opcode = x1_anonOut_d_bits_opcode; // @[Xbar.scala:216:19] wire [1:0] out_1_d_bits_param = x1_anonOut_d_bits_param; // @[Xbar.scala:216:19] wire [6:0] out_1_d_bits_source = x1_anonOut_d_bits_source; // @[Xbar.scala:216:19] wire _out_1_d_bits_sink_T = x1_anonOut_d_bits_sink; // @[Xbar.scala:251:53] wire out_1_d_bits_denied = x1_anonOut_d_bits_denied; // @[Xbar.scala:216:19] wire [63:0] out_1_d_bits_data = x1_anonOut_d_bits_data; // @[Xbar.scala:216:19] wire out_1_d_bits_corrupt = x1_anonOut_d_bits_corrupt; // @[Xbar.scala:216:19] wire out_2_a_ready = x1_anonOut_1_a_ready; // @[Xbar.scala:216:19] wire out_2_a_valid; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_valid_0 = x1_anonOut_1_a_valid; // @[Xbar.scala:74:9] wire [2:0] out_2_a_bits_opcode; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_bits_opcode_0 = x1_anonOut_1_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] out_2_a_bits_param; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_bits_param_0 = x1_anonOut_1_a_bits_param; // @[Xbar.scala:74:9] assign auto_anon_out_2_a_bits_size_0 = x1_anonOut_1_a_bits_size; // @[Xbar.scala:74:9] wire [6:0] out_2_a_bits_source; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_bits_source_0 = x1_anonOut_1_a_bits_source; // @[Xbar.scala:74:9] assign auto_anon_out_2_a_bits_address_0 = x1_anonOut_1_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] out_2_a_bits_mask; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_bits_mask_0 = x1_anonOut_1_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] out_2_a_bits_data; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_bits_data_0 = x1_anonOut_1_a_bits_data; // @[Xbar.scala:74:9] wire out_2_a_bits_corrupt; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_bits_corrupt_0 = x1_anonOut_1_a_bits_corrupt; // @[Xbar.scala:74:9] wire out_2_d_ready; // @[Xbar.scala:216:19] assign auto_anon_out_2_d_ready_0 = x1_anonOut_1_d_ready; // @[Xbar.scala:74:9] wire out_2_d_valid = x1_anonOut_1_d_valid; // @[Xbar.scala:216:19] wire [2:0] out_2_d_bits_opcode = x1_anonOut_1_d_bits_opcode; // @[Xbar.scala:216:19] wire [6:0] out_2_d_bits_source = x1_anonOut_1_d_bits_source; // @[Xbar.scala:216:19] wire [63:0] out_2_d_bits_data = x1_anonOut_1_d_bits_data; // @[Xbar.scala:216:19] wire out_3_a_ready = x1_anonOut_2_a_ready; // @[Xbar.scala:216:19] wire out_3_a_valid; // @[Xbar.scala:216:19] assign auto_anon_out_3_a_valid_0 = x1_anonOut_2_a_valid; // @[Xbar.scala:74:9] wire [2:0] out_3_a_bits_opcode; // @[Xbar.scala:216:19] assign auto_anon_out_3_a_bits_opcode_0 = x1_anonOut_2_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] out_3_a_bits_param; // @[Xbar.scala:216:19] assign auto_anon_out_3_a_bits_param_0 = x1_anonOut_2_a_bits_param; // @[Xbar.scala:74:9] assign auto_anon_out_3_a_bits_size_0 = x1_anonOut_2_a_bits_size; // @[Xbar.scala:74:9] wire [6:0] out_3_a_bits_source; // @[Xbar.scala:216:19] assign auto_anon_out_3_a_bits_source_0 = x1_anonOut_2_a_bits_source; // @[Xbar.scala:74:9] assign auto_anon_out_3_a_bits_address_0 = x1_anonOut_2_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] out_3_a_bits_mask; // @[Xbar.scala:216:19] assign auto_anon_out_3_a_bits_mask_0 = x1_anonOut_2_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] out_3_a_bits_data; // @[Xbar.scala:216:19] assign auto_anon_out_3_a_bits_data_0 = x1_anonOut_2_a_bits_data; // @[Xbar.scala:74:9] wire out_3_a_bits_corrupt; // @[Xbar.scala:216:19] assign auto_anon_out_3_a_bits_corrupt_0 = x1_anonOut_2_a_bits_corrupt; // @[Xbar.scala:74:9] wire out_3_d_ready; // @[Xbar.scala:216:19] assign auto_anon_out_3_d_ready_0 = x1_anonOut_2_d_ready; // @[Xbar.scala:74:9] wire out_3_d_valid = x1_anonOut_2_d_valid; // @[Xbar.scala:216:19] wire [2:0] out_3_d_bits_opcode = x1_anonOut_2_d_bits_opcode; // @[Xbar.scala:216:19] wire [6:0] out_3_d_bits_source = x1_anonOut_2_d_bits_source; // @[Xbar.scala:216:19] wire [63:0] out_3_d_bits_data = x1_anonOut_2_d_bits_data; // @[Xbar.scala:216:19] wire out_4_a_ready = x1_anonOut_3_a_ready; // @[Xbar.scala:216:19] wire out_4_a_valid; // @[Xbar.scala:216:19] assign auto_anon_out_4_a_valid_0 = x1_anonOut_3_a_valid; // @[Xbar.scala:74:9] wire [2:0] out_4_a_bits_opcode; // @[Xbar.scala:216:19] assign auto_anon_out_4_a_bits_opcode_0 = x1_anonOut_3_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] out_4_a_bits_param; // @[Xbar.scala:216:19] assign auto_anon_out_4_a_bits_param_0 = x1_anonOut_3_a_bits_param; // @[Xbar.scala:74:9] assign auto_anon_out_4_a_bits_size_0 = x1_anonOut_3_a_bits_size; // @[Xbar.scala:74:9] wire [6:0] out_4_a_bits_source; // @[Xbar.scala:216:19] assign auto_anon_out_4_a_bits_source_0 = x1_anonOut_3_a_bits_source; // @[Xbar.scala:74:9] assign auto_anon_out_4_a_bits_address_0 = x1_anonOut_3_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] out_4_a_bits_mask; // @[Xbar.scala:216:19] assign auto_anon_out_4_a_bits_mask_0 = x1_anonOut_3_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] out_4_a_bits_data; // @[Xbar.scala:216:19] assign auto_anon_out_4_a_bits_data_0 = x1_anonOut_3_a_bits_data; // @[Xbar.scala:74:9] wire out_4_a_bits_corrupt; // @[Xbar.scala:216:19] assign auto_anon_out_4_a_bits_corrupt_0 = x1_anonOut_3_a_bits_corrupt; // @[Xbar.scala:74:9] wire out_4_d_ready; // @[Xbar.scala:216:19] assign auto_anon_out_4_d_ready_0 = x1_anonOut_3_d_ready; // @[Xbar.scala:74:9] wire out_4_d_valid = x1_anonOut_3_d_valid; // @[Xbar.scala:216:19] wire [2:0] out_4_d_bits_opcode = x1_anonOut_3_d_bits_opcode; // @[Xbar.scala:216:19] wire [6:0] out_4_d_bits_source = x1_anonOut_3_d_bits_source; // @[Xbar.scala:216:19] wire [63:0] out_4_d_bits_data = x1_anonOut_3_d_bits_data; // @[Xbar.scala:216:19] wire out_5_a_ready = x1_anonOut_4_a_ready; // @[Xbar.scala:216:19] wire out_5_a_valid; // @[Xbar.scala:216:19] assign auto_anon_out_5_a_valid_0 = x1_anonOut_4_a_valid; // @[Xbar.scala:74:9] wire [2:0] out_5_a_bits_opcode; // @[Xbar.scala:216:19] assign auto_anon_out_5_a_bits_opcode_0 = x1_anonOut_4_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] out_5_a_bits_param; // @[Xbar.scala:216:19] assign auto_anon_out_5_a_bits_param_0 = x1_anonOut_4_a_bits_param; // @[Xbar.scala:74:9] assign auto_anon_out_5_a_bits_size_0 = x1_anonOut_4_a_bits_size; // @[Xbar.scala:74:9] wire [6:0] out_5_a_bits_source; // @[Xbar.scala:216:19] assign auto_anon_out_5_a_bits_source_0 = x1_anonOut_4_a_bits_source; // @[Xbar.scala:74:9] wire [31:0] out_5_a_bits_address; // @[Xbar.scala:216:19] assign auto_anon_out_5_a_bits_address_0 = x1_anonOut_4_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] out_5_a_bits_mask; // @[Xbar.scala:216:19] assign auto_anon_out_5_a_bits_mask_0 = x1_anonOut_4_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] out_5_a_bits_data; // @[Xbar.scala:216:19] assign auto_anon_out_5_a_bits_data_0 = x1_anonOut_4_a_bits_data; // @[Xbar.scala:74:9] wire out_5_a_bits_corrupt; // @[Xbar.scala:216:19] assign auto_anon_out_5_a_bits_corrupt_0 = x1_anonOut_4_a_bits_corrupt; // @[Xbar.scala:74:9] wire out_5_d_ready; // @[Xbar.scala:216:19] assign auto_anon_out_5_d_ready_0 = x1_anonOut_4_d_ready; // @[Xbar.scala:74:9] wire out_5_d_valid = x1_anonOut_4_d_valid; // @[Xbar.scala:216:19] wire [2:0] out_5_d_bits_opcode = x1_anonOut_4_d_bits_opcode; // @[Xbar.scala:216:19] wire [1:0] out_5_d_bits_param = x1_anonOut_4_d_bits_param; // @[Xbar.scala:216:19] wire [6:0] out_5_d_bits_source = x1_anonOut_4_d_bits_source; // @[Xbar.scala:216:19] wire _out_5_d_bits_sink_T = x1_anonOut_4_d_bits_sink; // @[Xbar.scala:251:53] wire out_5_d_bits_denied = x1_anonOut_4_d_bits_denied; // @[Xbar.scala:216:19] wire [63:0] out_5_d_bits_data = x1_anonOut_4_d_bits_data; // @[Xbar.scala:216:19] wire out_5_d_bits_corrupt = x1_anonOut_4_d_bits_corrupt; // @[Xbar.scala:216:19] wire out_6_a_ready = x1_anonOut_5_a_ready; // @[Xbar.scala:216:19] wire out_6_a_valid; // @[Xbar.scala:216:19] assign auto_anon_out_6_a_valid_0 = x1_anonOut_5_a_valid; // @[Xbar.scala:74:9] wire [2:0] out_6_a_bits_opcode; // @[Xbar.scala:216:19] assign auto_anon_out_6_a_bits_opcode_0 = x1_anonOut_5_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] out_6_a_bits_param; // @[Xbar.scala:216:19] assign auto_anon_out_6_a_bits_param_0 = x1_anonOut_5_a_bits_param; // @[Xbar.scala:74:9] assign auto_anon_out_6_a_bits_size_0 = x1_anonOut_5_a_bits_size; // @[Xbar.scala:74:9] wire [6:0] out_6_a_bits_source; // @[Xbar.scala:216:19] assign auto_anon_out_6_a_bits_source_0 = x1_anonOut_5_a_bits_source; // @[Xbar.scala:74:9] assign auto_anon_out_6_a_bits_address_0 = x1_anonOut_5_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] out_6_a_bits_mask; // @[Xbar.scala:216:19] assign auto_anon_out_6_a_bits_mask_0 = x1_anonOut_5_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] out_6_a_bits_data; // @[Xbar.scala:216:19] assign auto_anon_out_6_a_bits_data_0 = x1_anonOut_5_a_bits_data; // @[Xbar.scala:74:9] wire out_6_a_bits_corrupt; // @[Xbar.scala:216:19] assign auto_anon_out_6_a_bits_corrupt_0 = x1_anonOut_5_a_bits_corrupt; // @[Xbar.scala:74:9] wire out_6_d_ready; // @[Xbar.scala:216:19] assign auto_anon_out_6_d_ready_0 = x1_anonOut_5_d_ready; // @[Xbar.scala:74:9] wire out_6_d_valid = x1_anonOut_5_d_valid; // @[Xbar.scala:216:19] wire [6:0] out_6_d_bits_source = x1_anonOut_5_d_bits_source; // @[Xbar.scala:216:19] wire [63:0] out_6_d_bits_data = x1_anonOut_5_d_bits_data; // @[Xbar.scala:216:19] wire out_7_a_ready = x1_anonOut_6_a_ready; // @[Xbar.scala:216:19] wire out_7_a_valid; // @[Xbar.scala:216:19] assign auto_anon_out_7_a_valid_0 = x1_anonOut_6_a_valid; // @[Xbar.scala:74:9] wire [2:0] out_7_a_bits_opcode; // @[Xbar.scala:216:19] assign auto_anon_out_7_a_bits_opcode_0 = x1_anonOut_6_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] out_7_a_bits_param; // @[Xbar.scala:216:19] assign auto_anon_out_7_a_bits_param_0 = x1_anonOut_6_a_bits_param; // @[Xbar.scala:74:9] assign auto_anon_out_7_a_bits_size_0 = x1_anonOut_6_a_bits_size; // @[Xbar.scala:74:9] wire [6:0] out_7_a_bits_source; // @[Xbar.scala:216:19] assign auto_anon_out_7_a_bits_source_0 = x1_anonOut_6_a_bits_source; // @[Xbar.scala:74:9] assign auto_anon_out_7_a_bits_address_0 = x1_anonOut_6_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] out_7_a_bits_mask; // @[Xbar.scala:216:19] assign auto_anon_out_7_a_bits_mask_0 = x1_anonOut_6_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] out_7_a_bits_data; // @[Xbar.scala:216:19] assign auto_anon_out_7_a_bits_data_0 = x1_anonOut_6_a_bits_data; // @[Xbar.scala:74:9] wire out_7_a_bits_corrupt; // @[Xbar.scala:216:19] assign auto_anon_out_7_a_bits_corrupt_0 = x1_anonOut_6_a_bits_corrupt; // @[Xbar.scala:74:9] wire out_7_d_ready; // @[Xbar.scala:216:19] assign auto_anon_out_7_d_ready_0 = x1_anonOut_6_d_ready; // @[Xbar.scala:74:9] wire out_7_d_valid = x1_anonOut_6_d_valid; // @[Xbar.scala:216:19] wire [2:0] out_7_d_bits_opcode = x1_anonOut_6_d_bits_opcode; // @[Xbar.scala:216:19] wire [1:0] out_7_d_bits_param = x1_anonOut_6_d_bits_param; // @[Xbar.scala:216:19] wire [6:0] out_7_d_bits_source = x1_anonOut_6_d_bits_source; // @[Xbar.scala:216:19] wire _out_7_d_bits_sink_T = x1_anonOut_6_d_bits_sink; // @[Xbar.scala:251:53] wire out_7_d_bits_denied = x1_anonOut_6_d_bits_denied; // @[Xbar.scala:216:19] wire [63:0] out_7_d_bits_data = x1_anonOut_6_d_bits_data; // @[Xbar.scala:216:19] wire out_7_d_bits_corrupt = x1_anonOut_6_d_bits_corrupt; // @[Xbar.scala:216:19] wire _portsAOI_in_0_a_ready_WIRE; // @[Mux.scala:30:73] assign anonIn_a_ready = in_0_a_ready; // @[Xbar.scala:159:18] wire [2:0] portsAOI_filtered_0_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_1_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_2_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_3_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_4_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_5_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_6_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_7_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_0_bits_param = in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_1_bits_param = in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_2_bits_param = in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_3_bits_param = in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_4_bits_param = in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_5_bits_param = in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_6_bits_param = in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_7_bits_param = in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24] wire [3:0] portsAOI_filtered_0_bits_size = in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [3:0] portsAOI_filtered_1_bits_size = in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [3:0] portsAOI_filtered_2_bits_size = in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [3:0] portsAOI_filtered_3_bits_size = in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [3:0] portsAOI_filtered_4_bits_size = in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [3:0] portsAOI_filtered_5_bits_size = in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [3:0] portsAOI_filtered_6_bits_size = in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [3:0] portsAOI_filtered_7_bits_size = in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [6:0] portsAOI_filtered_0_bits_source = in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [6:0] portsAOI_filtered_1_bits_source = in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [6:0] portsAOI_filtered_2_bits_source = in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [6:0] portsAOI_filtered_3_bits_source = in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [6:0] portsAOI_filtered_4_bits_source = in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [6:0] portsAOI_filtered_5_bits_source = in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [6:0] portsAOI_filtered_6_bits_source = in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [6:0] portsAOI_filtered_7_bits_source = in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [31:0] _requestAIO_T_26 = in_0_a_bits_address; // @[Xbar.scala:159:18] wire [31:0] portsAOI_filtered_0_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [31:0] portsAOI_filtered_1_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [31:0] portsAOI_filtered_2_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [31:0] portsAOI_filtered_3_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [31:0] portsAOI_filtered_4_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [31:0] portsAOI_filtered_5_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [31:0] portsAOI_filtered_6_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [31:0] portsAOI_filtered_7_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_0_bits_mask = in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_1_bits_mask = in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_2_bits_mask = in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_3_bits_mask = in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_4_bits_mask = in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_5_bits_mask = in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_6_bits_mask = in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_7_bits_mask = in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [63:0] portsAOI_filtered_0_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire [63:0] portsAOI_filtered_1_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire [63:0] portsAOI_filtered_2_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire [63:0] portsAOI_filtered_3_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire [63:0] portsAOI_filtered_4_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire [63:0] portsAOI_filtered_5_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire [63:0] portsAOI_filtered_6_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire [63:0] portsAOI_filtered_7_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_0_bits_corrupt = in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_1_bits_corrupt = in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_2_bits_corrupt = in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_3_bits_corrupt = in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_4_bits_corrupt = in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_5_bits_corrupt = in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_6_bits_corrupt = in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_7_bits_corrupt = in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire _in_0_d_valid_T_22; // @[Arbiter.scala:96:24] assign anonIn_d_valid = in_0_d_valid; // @[Xbar.scala:159:18] wire [2:0] _in_0_d_bits_WIRE_opcode; // @[Mux.scala:30:73] assign anonIn_d_bits_opcode = in_0_d_bits_opcode; // @[Xbar.scala:159:18] wire [1:0] _in_0_d_bits_WIRE_param; // @[Mux.scala:30:73] assign anonIn_d_bits_param = in_0_d_bits_param; // @[Xbar.scala:159:18] wire [3:0] _in_0_d_bits_WIRE_size; // @[Mux.scala:30:73] assign anonIn_d_bits_size = in_0_d_bits_size; // @[Xbar.scala:159:18] wire [6:0] _in_0_d_bits_WIRE_source; // @[Mux.scala:30:73] assign _anonIn_d_bits_source_T = in_0_d_bits_source; // @[Xbar.scala:156:69, :159:18] wire _in_0_d_bits_WIRE_sink; // @[Mux.scala:30:73] assign anonIn_d_bits_sink = in_0_d_bits_sink; // @[Xbar.scala:159:18] wire _in_0_d_bits_WIRE_denied; // @[Mux.scala:30:73] assign anonIn_d_bits_denied = in_0_d_bits_denied; // @[Xbar.scala:159:18] wire [63:0] _in_0_d_bits_WIRE_data; // @[Mux.scala:30:73] assign anonIn_d_bits_data = in_0_d_bits_data; // @[Xbar.scala:159:18] wire _in_0_d_bits_WIRE_corrupt; // @[Mux.scala:30:73] assign anonIn_d_bits_corrupt = in_0_d_bits_corrupt; // @[Xbar.scala:159:18] assign in_0_a_bits_source = _in_0_a_bits_source_T; // @[Xbar.scala:159:18, :166:55] assign anonIn_d_bits_source = _anonIn_d_bits_source_T; // @[Xbar.scala:156:69] wire portsAOI_filtered_0_ready = out_0_a_ready; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_0_valid; // @[Xbar.scala:352:24] assign anonOut_a_valid = out_0_a_valid; // @[Xbar.scala:216:19] assign anonOut_a_bits_opcode = out_0_a_bits_opcode; // @[Xbar.scala:216:19] assign anonOut_a_bits_param = out_0_a_bits_param; // @[Xbar.scala:216:19] assign anonOut_a_bits_size = out_0_a_bits_size; // @[Xbar.scala:216:19] assign anonOut_a_bits_source = out_0_a_bits_source; // @[Xbar.scala:216:19] assign anonOut_a_bits_mask = out_0_a_bits_mask; // @[Xbar.scala:216:19] assign anonOut_a_bits_data = out_0_a_bits_data; // @[Xbar.scala:216:19] assign anonOut_a_bits_corrupt = out_0_a_bits_corrupt; // @[Xbar.scala:216:19] wire portsDIO_filtered_0_ready; // @[Xbar.scala:352:24] assign anonOut_d_ready = out_0_d_ready; // @[Xbar.scala:216:19] wire _portsDIO_filtered_0_valid_T_1 = out_0_d_valid; // @[Xbar.scala:216:19, :355:40] wire [2:0] portsDIO_filtered_0_bits_opcode = out_0_d_bits_opcode; // @[Xbar.scala:216:19, :352:24] wire [1:0] portsDIO_filtered_0_bits_param = out_0_d_bits_param; // @[Xbar.scala:216:19, :352:24] wire [3:0] portsDIO_filtered_0_bits_size = out_0_d_bits_size; // @[Xbar.scala:216:19, :352:24] wire [6:0] _requestDOI_uncommonBits_T = out_0_d_bits_source; // @[Xbar.scala:216:19] wire [6:0] portsDIO_filtered_0_bits_source = out_0_d_bits_source; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_0_bits_sink = out_0_d_bits_sink; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_0_bits_denied = out_0_d_bits_denied; // @[Xbar.scala:216:19, :352:24] wire [63:0] portsDIO_filtered_0_bits_data = out_0_d_bits_data; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_0_bits_corrupt = out_0_d_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_1_ready = out_1_a_ready; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_1_valid; // @[Xbar.scala:352:24] assign x1_anonOut_a_valid = out_1_a_valid; // @[Xbar.scala:216:19] assign x1_anonOut_a_bits_opcode = out_1_a_bits_opcode; // @[Xbar.scala:216:19] assign x1_anonOut_a_bits_param = out_1_a_bits_param; // @[Xbar.scala:216:19] assign x1_anonOut_a_bits_source = out_1_a_bits_source; // @[Xbar.scala:216:19] assign x1_anonOut_a_bits_mask = out_1_a_bits_mask; // @[Xbar.scala:216:19] assign x1_anonOut_a_bits_data = out_1_a_bits_data; // @[Xbar.scala:216:19] assign x1_anonOut_a_bits_corrupt = out_1_a_bits_corrupt; // @[Xbar.scala:216:19] wire portsDIO_filtered_1_0_ready; // @[Xbar.scala:352:24] assign x1_anonOut_d_ready = out_1_d_ready; // @[Xbar.scala:216:19] wire _portsDIO_filtered_0_valid_T_3 = out_1_d_valid; // @[Xbar.scala:216:19, :355:40] wire [2:0] portsDIO_filtered_1_0_bits_opcode = out_1_d_bits_opcode; // @[Xbar.scala:216:19, :352:24] wire [1:0] portsDIO_filtered_1_0_bits_param = out_1_d_bits_param; // @[Xbar.scala:216:19, :352:24] wire [3:0] portsDIO_filtered_1_0_bits_size = out_1_d_bits_size; // @[Xbar.scala:216:19, :352:24] wire [6:0] _requestDOI_uncommonBits_T_1 = out_1_d_bits_source; // @[Xbar.scala:216:19] wire [6:0] portsDIO_filtered_1_0_bits_source = out_1_d_bits_source; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_1_0_bits_sink = out_1_d_bits_sink; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_1_0_bits_denied = out_1_d_bits_denied; // @[Xbar.scala:216:19, :352:24] wire [63:0] portsDIO_filtered_1_0_bits_data = out_1_d_bits_data; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_1_0_bits_corrupt = out_1_d_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_2_ready = out_2_a_ready; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_2_valid; // @[Xbar.scala:352:24] assign x1_anonOut_1_a_valid = out_2_a_valid; // @[Xbar.scala:216:19] assign x1_anonOut_1_a_bits_opcode = out_2_a_bits_opcode; // @[Xbar.scala:216:19] assign x1_anonOut_1_a_bits_param = out_2_a_bits_param; // @[Xbar.scala:216:19] assign x1_anonOut_1_a_bits_source = out_2_a_bits_source; // @[Xbar.scala:216:19] assign x1_anonOut_1_a_bits_mask = out_2_a_bits_mask; // @[Xbar.scala:216:19] assign x1_anonOut_1_a_bits_data = out_2_a_bits_data; // @[Xbar.scala:216:19] assign x1_anonOut_1_a_bits_corrupt = out_2_a_bits_corrupt; // @[Xbar.scala:216:19] wire portsDIO_filtered_2_0_ready; // @[Xbar.scala:352:24] assign x1_anonOut_1_d_ready = out_2_d_ready; // @[Xbar.scala:216:19] wire _portsDIO_filtered_0_valid_T_5 = out_2_d_valid; // @[Xbar.scala:216:19, :355:40] wire [2:0] portsDIO_filtered_2_0_bits_opcode = out_2_d_bits_opcode; // @[Xbar.scala:216:19, :352:24] wire [3:0] portsDIO_filtered_2_0_bits_size = out_2_d_bits_size; // @[Xbar.scala:216:19, :352:24] wire [6:0] _requestDOI_uncommonBits_T_2 = out_2_d_bits_source; // @[Xbar.scala:216:19] wire [6:0] portsDIO_filtered_2_0_bits_source = out_2_d_bits_source; // @[Xbar.scala:216:19, :352:24] wire [63:0] portsDIO_filtered_2_0_bits_data = out_2_d_bits_data; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_3_ready = out_3_a_ready; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_3_valid; // @[Xbar.scala:352:24] assign x1_anonOut_2_a_valid = out_3_a_valid; // @[Xbar.scala:216:19] assign x1_anonOut_2_a_bits_opcode = out_3_a_bits_opcode; // @[Xbar.scala:216:19] assign x1_anonOut_2_a_bits_param = out_3_a_bits_param; // @[Xbar.scala:216:19] assign x1_anonOut_2_a_bits_source = out_3_a_bits_source; // @[Xbar.scala:216:19] assign x1_anonOut_2_a_bits_mask = out_3_a_bits_mask; // @[Xbar.scala:216:19] assign x1_anonOut_2_a_bits_data = out_3_a_bits_data; // @[Xbar.scala:216:19] assign x1_anonOut_2_a_bits_corrupt = out_3_a_bits_corrupt; // @[Xbar.scala:216:19] wire portsDIO_filtered_3_0_ready; // @[Xbar.scala:352:24] assign x1_anonOut_2_d_ready = out_3_d_ready; // @[Xbar.scala:216:19] wire _portsDIO_filtered_0_valid_T_7 = out_3_d_valid; // @[Xbar.scala:216:19, :355:40] wire [2:0] portsDIO_filtered_3_0_bits_opcode = out_3_d_bits_opcode; // @[Xbar.scala:216:19, :352:24] wire [3:0] portsDIO_filtered_3_0_bits_size = out_3_d_bits_size; // @[Xbar.scala:216:19, :352:24] wire [6:0] _requestDOI_uncommonBits_T_3 = out_3_d_bits_source; // @[Xbar.scala:216:19] wire [6:0] portsDIO_filtered_3_0_bits_source = out_3_d_bits_source; // @[Xbar.scala:216:19, :352:24] wire [63:0] portsDIO_filtered_3_0_bits_data = out_3_d_bits_data; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_4_ready = out_4_a_ready; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_4_valid; // @[Xbar.scala:352:24] assign x1_anonOut_3_a_valid = out_4_a_valid; // @[Xbar.scala:216:19] assign x1_anonOut_3_a_bits_opcode = out_4_a_bits_opcode; // @[Xbar.scala:216:19] assign x1_anonOut_3_a_bits_param = out_4_a_bits_param; // @[Xbar.scala:216:19] assign x1_anonOut_3_a_bits_source = out_4_a_bits_source; // @[Xbar.scala:216:19] assign x1_anonOut_3_a_bits_mask = out_4_a_bits_mask; // @[Xbar.scala:216:19] assign x1_anonOut_3_a_bits_data = out_4_a_bits_data; // @[Xbar.scala:216:19] assign x1_anonOut_3_a_bits_corrupt = out_4_a_bits_corrupt; // @[Xbar.scala:216:19] wire portsDIO_filtered_4_0_ready; // @[Xbar.scala:352:24] assign x1_anonOut_3_d_ready = out_4_d_ready; // @[Xbar.scala:216:19] wire _portsDIO_filtered_0_valid_T_9 = out_4_d_valid; // @[Xbar.scala:216:19, :355:40] wire [2:0] portsDIO_filtered_4_0_bits_opcode = out_4_d_bits_opcode; // @[Xbar.scala:216:19, :352:24] wire [3:0] portsDIO_filtered_4_0_bits_size = out_4_d_bits_size; // @[Xbar.scala:216:19, :352:24] wire [6:0] _requestDOI_uncommonBits_T_4 = out_4_d_bits_source; // @[Xbar.scala:216:19] wire [6:0] portsDIO_filtered_4_0_bits_source = out_4_d_bits_source; // @[Xbar.scala:216:19, :352:24] wire [63:0] portsDIO_filtered_4_0_bits_data = out_4_d_bits_data; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_5_ready = out_5_a_ready; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_5_valid; // @[Xbar.scala:352:24] assign x1_anonOut_4_a_valid = out_5_a_valid; // @[Xbar.scala:216:19] assign x1_anonOut_4_a_bits_opcode = out_5_a_bits_opcode; // @[Xbar.scala:216:19] assign x1_anonOut_4_a_bits_param = out_5_a_bits_param; // @[Xbar.scala:216:19] assign x1_anonOut_4_a_bits_source = out_5_a_bits_source; // @[Xbar.scala:216:19] assign x1_anonOut_4_a_bits_address = out_5_a_bits_address; // @[Xbar.scala:216:19] assign x1_anonOut_4_a_bits_mask = out_5_a_bits_mask; // @[Xbar.scala:216:19] assign x1_anonOut_4_a_bits_data = out_5_a_bits_data; // @[Xbar.scala:216:19] assign x1_anonOut_4_a_bits_corrupt = out_5_a_bits_corrupt; // @[Xbar.scala:216:19] wire portsDIO_filtered_5_0_ready; // @[Xbar.scala:352:24] assign x1_anonOut_4_d_ready = out_5_d_ready; // @[Xbar.scala:216:19] wire _portsDIO_filtered_0_valid_T_11 = out_5_d_valid; // @[Xbar.scala:216:19, :355:40] wire [2:0] portsDIO_filtered_5_0_bits_opcode = out_5_d_bits_opcode; // @[Xbar.scala:216:19, :352:24] wire [1:0] portsDIO_filtered_5_0_bits_param = out_5_d_bits_param; // @[Xbar.scala:216:19, :352:24] wire [3:0] portsDIO_filtered_5_0_bits_size = out_5_d_bits_size; // @[Xbar.scala:216:19, :352:24] wire [6:0] _requestDOI_uncommonBits_T_5 = out_5_d_bits_source; // @[Xbar.scala:216:19] wire [6:0] portsDIO_filtered_5_0_bits_source = out_5_d_bits_source; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_5_0_bits_sink = out_5_d_bits_sink; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_5_0_bits_denied = out_5_d_bits_denied; // @[Xbar.scala:216:19, :352:24] wire [63:0] portsDIO_filtered_5_0_bits_data = out_5_d_bits_data; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_5_0_bits_corrupt = out_5_d_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_6_ready = out_6_a_ready; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_6_valid; // @[Xbar.scala:352:24] assign x1_anonOut_5_a_valid = out_6_a_valid; // @[Xbar.scala:216:19] assign x1_anonOut_5_a_bits_opcode = out_6_a_bits_opcode; // @[Xbar.scala:216:19] assign x1_anonOut_5_a_bits_param = out_6_a_bits_param; // @[Xbar.scala:216:19] assign x1_anonOut_5_a_bits_source = out_6_a_bits_source; // @[Xbar.scala:216:19] assign x1_anonOut_5_a_bits_mask = out_6_a_bits_mask; // @[Xbar.scala:216:19] assign x1_anonOut_5_a_bits_data = out_6_a_bits_data; // @[Xbar.scala:216:19] assign x1_anonOut_5_a_bits_corrupt = out_6_a_bits_corrupt; // @[Xbar.scala:216:19] wire portsDIO_filtered_6_0_ready; // @[Xbar.scala:352:24] assign x1_anonOut_5_d_ready = out_6_d_ready; // @[Xbar.scala:216:19] wire _portsDIO_filtered_0_valid_T_13 = out_6_d_valid; // @[Xbar.scala:216:19, :355:40] wire [3:0] portsDIO_filtered_6_0_bits_size = out_6_d_bits_size; // @[Xbar.scala:216:19, :352:24] wire [6:0] _requestDOI_uncommonBits_T_6 = out_6_d_bits_source; // @[Xbar.scala:216:19] wire [6:0] portsDIO_filtered_6_0_bits_source = out_6_d_bits_source; // @[Xbar.scala:216:19, :352:24] wire [63:0] portsDIO_filtered_6_0_bits_data = out_6_d_bits_data; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_7_ready = out_7_a_ready; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_7_valid; // @[Xbar.scala:352:24] assign x1_anonOut_6_a_valid = out_7_a_valid; // @[Xbar.scala:216:19] assign x1_anonOut_6_a_bits_opcode = out_7_a_bits_opcode; // @[Xbar.scala:216:19] assign x1_anonOut_6_a_bits_param = out_7_a_bits_param; // @[Xbar.scala:216:19] assign x1_anonOut_6_a_bits_source = out_7_a_bits_source; // @[Xbar.scala:216:19] assign x1_anonOut_6_a_bits_mask = out_7_a_bits_mask; // @[Xbar.scala:216:19] assign x1_anonOut_6_a_bits_data = out_7_a_bits_data; // @[Xbar.scala:216:19] assign x1_anonOut_6_a_bits_corrupt = out_7_a_bits_corrupt; // @[Xbar.scala:216:19] wire portsDIO_filtered_7_0_ready; // @[Xbar.scala:352:24] assign x1_anonOut_6_d_ready = out_7_d_ready; // @[Xbar.scala:216:19] wire _portsDIO_filtered_0_valid_T_15 = out_7_d_valid; // @[Xbar.scala:216:19, :355:40] wire [2:0] portsDIO_filtered_7_0_bits_opcode = out_7_d_bits_opcode; // @[Xbar.scala:216:19, :352:24] wire [1:0] portsDIO_filtered_7_0_bits_param = out_7_d_bits_param; // @[Xbar.scala:216:19, :352:24] wire [3:0] portsDIO_filtered_7_0_bits_size = out_7_d_bits_size; // @[Xbar.scala:216:19, :352:24] wire [6:0] _requestDOI_uncommonBits_T_7 = out_7_d_bits_source; // @[Xbar.scala:216:19] wire [6:0] portsDIO_filtered_7_0_bits_source = out_7_d_bits_source; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_7_0_bits_sink = out_7_d_bits_sink; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_7_0_bits_denied = out_7_d_bits_denied; // @[Xbar.scala:216:19, :352:24] wire [63:0] portsDIO_filtered_7_0_bits_data = out_7_d_bits_data; // @[Xbar.scala:216:19, :352:24] wire [31:0] out_0_a_bits_address; // @[Xbar.scala:216:19] wire portsDIO_filtered_7_0_bits_corrupt = out_7_d_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire [3:0] out_1_a_bits_size; // @[Xbar.scala:216:19] wire [31:0] out_1_a_bits_address; // @[Xbar.scala:216:19] wire [3:0] out_2_a_bits_size; // @[Xbar.scala:216:19] wire [31:0] out_2_a_bits_address; // @[Xbar.scala:216:19] wire [3:0] out_3_a_bits_size; // @[Xbar.scala:216:19] wire [31:0] out_3_a_bits_address; // @[Xbar.scala:216:19] wire [3:0] out_4_a_bits_size; // @[Xbar.scala:216:19] wire [31:0] out_4_a_bits_address; // @[Xbar.scala:216:19] wire [3:0] out_5_a_bits_size; // @[Xbar.scala:216:19] wire [3:0] out_6_a_bits_size; // @[Xbar.scala:216:19] wire [31:0] out_6_a_bits_address; // @[Xbar.scala:216:19] wire [3:0] out_7_a_bits_size; // @[Xbar.scala:216:19] wire [31:0] out_7_a_bits_address; // @[Xbar.scala:216:19] assign anonOut_a_bits_address = out_0_a_bits_address[13:0]; // @[Xbar.scala:216:19, :222:41] assign out_0_d_bits_sink = _out_0_d_bits_sink_T; // @[Xbar.scala:216:19, :251:53] assign x1_anonOut_a_bits_address = out_1_a_bits_address[28:0]; // @[Xbar.scala:216:19, :222:41] assign x1_anonOut_a_bits_size = out_1_a_bits_size[2:0]; // @[Xbar.scala:216:19, :222:41] assign out_1_d_bits_size = {1'h0, x1_anonOut_d_bits_size}; // @[Xbar.scala:216:19, :250:29] assign out_1_d_bits_sink = _out_1_d_bits_sink_T; // @[Xbar.scala:216:19, :251:53] assign x1_anonOut_1_a_bits_address = out_2_a_bits_address[25:0]; // @[Xbar.scala:216:19, :222:41] assign x1_anonOut_1_a_bits_size = out_2_a_bits_size[2:0]; // @[Xbar.scala:216:19, :222:41] assign out_2_d_bits_size = {1'h0, x1_anonOut_1_d_bits_size}; // @[Xbar.scala:216:19, :250:29] assign x1_anonOut_2_a_bits_address = out_3_a_bits_address[27:0]; // @[Xbar.scala:216:19, :222:41] assign x1_anonOut_2_a_bits_size = out_3_a_bits_size[2:0]; // @[Xbar.scala:216:19, :222:41] assign out_3_d_bits_size = {1'h0, x1_anonOut_2_d_bits_size}; // @[Xbar.scala:216:19, :250:29] assign x1_anonOut_3_a_bits_address = out_4_a_bits_address[11:0]; // @[Xbar.scala:216:19, :222:41] assign x1_anonOut_3_a_bits_size = out_4_a_bits_size[2:0]; // @[Xbar.scala:216:19, :222:41] assign out_4_d_bits_size = {1'h0, x1_anonOut_3_d_bits_size}; // @[Xbar.scala:216:19, :250:29] assign x1_anonOut_4_a_bits_size = out_5_a_bits_size[2:0]; // @[Xbar.scala:216:19, :222:41] assign out_5_d_bits_size = {1'h0, x1_anonOut_4_d_bits_size}; // @[Xbar.scala:216:19, :250:29] assign out_5_d_bits_sink = _out_5_d_bits_sink_T; // @[Xbar.scala:216:19, :251:53] assign x1_anonOut_5_a_bits_address = out_6_a_bits_address[16:0]; // @[Xbar.scala:216:19, :222:41] assign x1_anonOut_5_a_bits_size = out_6_a_bits_size[2:0]; // @[Xbar.scala:216:19, :222:41] assign out_6_d_bits_size = {1'h0, x1_anonOut_5_d_bits_size}; // @[Xbar.scala:216:19, :250:29] assign x1_anonOut_6_a_bits_address = out_7_a_bits_address[20:0]; // @[Xbar.scala:216:19, :222:41] assign x1_anonOut_6_a_bits_size = out_7_a_bits_size[2:0]; // @[Xbar.scala:216:19, :222:41] assign out_7_d_bits_size = {1'h0, x1_anonOut_6_d_bits_size}; // @[Xbar.scala:216:19, :250:29] assign out_7_d_bits_sink = _out_7_d_bits_sink_T; // @[Xbar.scala:216:19, :251:53] wire [31:0] _requestAIO_T = {in_0_a_bits_address[31:14], in_0_a_bits_address[13:0] ^ 14'h3000}; // @[Xbar.scala:159:18] wire [32:0] _requestAIO_T_1 = {1'h0, _requestAIO_T}; // @[Parameters.scala:137:{31,41}] wire [32:0] _requestAIO_T_2 = _requestAIO_T_1 & 33'h9A113000; // @[Parameters.scala:137:{41,46}] wire [32:0] _requestAIO_T_3 = _requestAIO_T_2; // @[Parameters.scala:137:46] wire _requestAIO_T_4 = _requestAIO_T_3 == 33'h0; // @[Parameters.scala:137:{46,59}] wire requestAIO_0_0 = _requestAIO_T_4; // @[Xbar.scala:307:107] wire _portsAOI_filtered_0_valid_T = requestAIO_0_0; // @[Xbar.scala:307:107, :355:54] wire [31:0] _requestAIO_T_5 = {in_0_a_bits_address[31:13], in_0_a_bits_address[12:0] ^ 13'h1000}; // @[Xbar.scala:159:18] wire [32:0] _requestAIO_T_6 = {1'h0, _requestAIO_T_5}; // @[Parameters.scala:137:{31,41}] wire [32:0] _requestAIO_T_7 = _requestAIO_T_6 & 33'h9A113000; // @[Parameters.scala:137:{41,46}] wire [32:0] _requestAIO_T_8 = _requestAIO_T_7; // @[Parameters.scala:137:46] wire _requestAIO_T_9 = _requestAIO_T_8 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _requestAIO_T_10 = {in_0_a_bits_address[31:29], in_0_a_bits_address[28:0] ^ 29'h10000000}; // @[Xbar.scala:159:18] wire [32:0] _requestAIO_T_11 = {1'h0, _requestAIO_T_10}; // @[Parameters.scala:137:{31,41}] wire [32:0] _requestAIO_T_12 = _requestAIO_T_11 & 33'h9A113000; // @[Parameters.scala:137:{41,46}] wire [32:0] _requestAIO_T_13 = _requestAIO_T_12; // @[Parameters.scala:137:46] wire _requestAIO_T_14 = _requestAIO_T_13 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _requestAIO_T_15 = _requestAIO_T_9 | _requestAIO_T_14; // @[Xbar.scala:291:92] wire requestAIO_0_1 = _requestAIO_T_15; // @[Xbar.scala:291:92, :307:107] wire _portsAOI_filtered_1_valid_T = requestAIO_0_1; // @[Xbar.scala:307:107, :355:54] wire [31:0] _requestAIO_T_16 = {in_0_a_bits_address[31:26], in_0_a_bits_address[25:0] ^ 26'h2000000}; // @[Xbar.scala:159:18] wire [32:0] _requestAIO_T_17 = {1'h0, _requestAIO_T_16}; // @[Parameters.scala:137:{31,41}] wire [32:0] _requestAIO_T_18 = _requestAIO_T_17 & 33'h9A110000; // @[Parameters.scala:137:{41,46}] wire [32:0] _requestAIO_T_19 = _requestAIO_T_18; // @[Parameters.scala:137:46] wire _requestAIO_T_20 = _requestAIO_T_19 == 33'h0; // @[Parameters.scala:137:{46,59}] wire requestAIO_0_2 = _requestAIO_T_20; // @[Xbar.scala:307:107] wire _portsAOI_filtered_2_valid_T = requestAIO_0_2; // @[Xbar.scala:307:107, :355:54] wire [31:0] _requestAIO_T_21 = {in_0_a_bits_address[31:28], in_0_a_bits_address[27:0] ^ 28'h8000000}; // @[Xbar.scala:159:18] wire [32:0] _requestAIO_T_22 = {1'h0, _requestAIO_T_21}; // @[Parameters.scala:137:{31,41}] wire [32:0] _requestAIO_T_23 = _requestAIO_T_22 & 33'h98000000; // @[Parameters.scala:137:{41,46}] wire [32:0] _requestAIO_T_24 = _requestAIO_T_23; // @[Parameters.scala:137:46] wire _requestAIO_T_25 = _requestAIO_T_24 == 33'h0; // @[Parameters.scala:137:{46,59}] wire requestAIO_0_3 = _requestAIO_T_25; // @[Xbar.scala:307:107] wire _portsAOI_filtered_3_valid_T = requestAIO_0_3; // @[Xbar.scala:307:107, :355:54] wire [32:0] _requestAIO_T_27 = {1'h0, _requestAIO_T_26}; // @[Parameters.scala:137:{31,41}] wire [32:0] _requestAIO_T_28 = _requestAIO_T_27 & 33'h9A113000; // @[Parameters.scala:137:{41,46}] wire [32:0] _requestAIO_T_29 = _requestAIO_T_28; // @[Parameters.scala:137:46] wire _requestAIO_T_30 = _requestAIO_T_29 == 33'h0; // @[Parameters.scala:137:{46,59}] wire requestAIO_0_4 = _requestAIO_T_30; // @[Xbar.scala:307:107] wire _portsAOI_filtered_4_valid_T = requestAIO_0_4; // @[Xbar.scala:307:107, :355:54] wire [31:0] _requestAIO_T_31 = in_0_a_bits_address ^ 32'h80000000; // @[Xbar.scala:159:18] wire [32:0] _requestAIO_T_32 = {1'h0, _requestAIO_T_31}; // @[Parameters.scala:137:{31,41}] wire [32:0] _requestAIO_T_33 = _requestAIO_T_32 & 33'h9A100000; // @[Parameters.scala:137:{41,46}] wire [32:0] _requestAIO_T_34 = _requestAIO_T_33; // @[Parameters.scala:137:46] wire _requestAIO_T_35 = _requestAIO_T_34 == 33'h0; // @[Parameters.scala:137:{46,59}] wire requestAIO_0_5 = _requestAIO_T_35; // @[Xbar.scala:307:107] wire _portsAOI_filtered_5_valid_T = requestAIO_0_5; // @[Xbar.scala:307:107, :355:54] wire [31:0] _requestAIO_T_36 = {in_0_a_bits_address[31:17], in_0_a_bits_address[16:0] ^ 17'h10000}; // @[Xbar.scala:159:18] wire [32:0] _requestAIO_T_37 = {1'h0, _requestAIO_T_36}; // @[Parameters.scala:137:{31,41}] wire [32:0] _requestAIO_T_38 = _requestAIO_T_37 & 33'h9A110000; // @[Parameters.scala:137:{41,46}] wire [32:0] _requestAIO_T_39 = _requestAIO_T_38; // @[Parameters.scala:137:46] wire _requestAIO_T_40 = _requestAIO_T_39 == 33'h0; // @[Parameters.scala:137:{46,59}] wire requestAIO_0_6 = _requestAIO_T_40; // @[Xbar.scala:307:107] wire _portsAOI_filtered_6_valid_T = requestAIO_0_6; // @[Xbar.scala:307:107, :355:54] wire [31:0] _requestAIO_T_41 = {in_0_a_bits_address[31:21], in_0_a_bits_address[20:0] ^ 21'h100000}; // @[Xbar.scala:159:18] wire [32:0] _requestAIO_T_42 = {1'h0, _requestAIO_T_41}; // @[Parameters.scala:137:{31,41}] wire [32:0] _requestAIO_T_43 = _requestAIO_T_42 & 33'h9A103000; // @[Parameters.scala:137:{41,46}] wire [32:0] _requestAIO_T_44 = _requestAIO_T_43; // @[Parameters.scala:137:46] wire _requestAIO_T_45 = _requestAIO_T_44 == 33'h0; // @[Parameters.scala:137:{46,59}] wire requestAIO_0_7 = _requestAIO_T_45; // @[Xbar.scala:307:107] wire _portsAOI_filtered_7_valid_T = requestAIO_0_7; // @[Xbar.scala:307:107, :355:54] wire [6:0] requestDOI_uncommonBits = _requestDOI_uncommonBits_T; // @[Parameters.scala:52:{29,56}] wire [6:0] requestDOI_uncommonBits_1 = _requestDOI_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}] wire [6:0] requestDOI_uncommonBits_2 = _requestDOI_uncommonBits_T_2; // @[Parameters.scala:52:{29,56}] wire [6:0] requestDOI_uncommonBits_3 = _requestDOI_uncommonBits_T_3; // @[Parameters.scala:52:{29,56}] wire [6:0] requestDOI_uncommonBits_4 = _requestDOI_uncommonBits_T_4; // @[Parameters.scala:52:{29,56}] wire [6:0] requestDOI_uncommonBits_5 = _requestDOI_uncommonBits_T_5; // @[Parameters.scala:52:{29,56}] wire [6:0] requestDOI_uncommonBits_6 = _requestDOI_uncommonBits_T_6; // @[Parameters.scala:52:{29,56}] wire [6:0] requestDOI_uncommonBits_7 = _requestDOI_uncommonBits_T_7; // @[Parameters.scala:52:{29,56}] wire [26:0] _beatsAI_decode_T = 27'hFFF << in_0_a_bits_size; // @[package.scala:243:71] wire [11:0] _beatsAI_decode_T_1 = _beatsAI_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _beatsAI_decode_T_2 = ~_beatsAI_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] beatsAI_decode = _beatsAI_decode_T_2[11:3]; // @[package.scala:243:46] wire _beatsAI_opdata_T = in_0_a_bits_opcode[2]; // @[Xbar.scala:159:18] wire beatsAI_opdata = ~_beatsAI_opdata_T; // @[Edges.scala:92:{28,37}] wire [8:0] beatsAI_0 = beatsAI_opdata ? beatsAI_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14] wire [26:0] _beatsDO_decode_T = 27'hFFF << out_0_d_bits_size; // @[package.scala:243:71] wire [11:0] _beatsDO_decode_T_1 = _beatsDO_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _beatsDO_decode_T_2 = ~_beatsDO_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] beatsDO_decode = _beatsDO_decode_T_2[11:3]; // @[package.scala:243:46] wire beatsDO_opdata = out_0_d_bits_opcode[0]; // @[Xbar.scala:216:19] wire [8:0] beatsDO_0 = beatsDO_opdata ? beatsDO_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] wire [20:0] _beatsDO_decode_T_3 = 21'h3F << out_1_d_bits_size; // @[package.scala:243:71] wire [5:0] _beatsDO_decode_T_4 = _beatsDO_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _beatsDO_decode_T_5 = ~_beatsDO_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] beatsDO_decode_1 = _beatsDO_decode_T_5[5:3]; // @[package.scala:243:46] wire beatsDO_opdata_1 = out_1_d_bits_opcode[0]; // @[Xbar.scala:216:19] wire [2:0] beatsDO_1 = beatsDO_opdata_1 ? beatsDO_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] wire [20:0] _beatsDO_decode_T_6 = 21'h3F << out_2_d_bits_size; // @[package.scala:243:71] wire [5:0] _beatsDO_decode_T_7 = _beatsDO_decode_T_6[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _beatsDO_decode_T_8 = ~_beatsDO_decode_T_7; // @[package.scala:243:{46,76}] wire [2:0] beatsDO_decode_2 = _beatsDO_decode_T_8[5:3]; // @[package.scala:243:46] wire beatsDO_opdata_2 = out_2_d_bits_opcode[0]; // @[Xbar.scala:216:19] wire [2:0] beatsDO_2 = beatsDO_opdata_2 ? beatsDO_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] wire [20:0] _beatsDO_decode_T_9 = 21'h3F << out_3_d_bits_size; // @[package.scala:243:71] wire [5:0] _beatsDO_decode_T_10 = _beatsDO_decode_T_9[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _beatsDO_decode_T_11 = ~_beatsDO_decode_T_10; // @[package.scala:243:{46,76}] wire [2:0] beatsDO_decode_3 = _beatsDO_decode_T_11[5:3]; // @[package.scala:243:46] wire beatsDO_opdata_3 = out_3_d_bits_opcode[0]; // @[Xbar.scala:216:19] wire [2:0] beatsDO_3 = beatsDO_opdata_3 ? beatsDO_decode_3 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] wire [20:0] _beatsDO_decode_T_12 = 21'h3F << out_4_d_bits_size; // @[package.scala:243:71] wire [5:0] _beatsDO_decode_T_13 = _beatsDO_decode_T_12[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _beatsDO_decode_T_14 = ~_beatsDO_decode_T_13; // @[package.scala:243:{46,76}] wire [2:0] beatsDO_decode_4 = _beatsDO_decode_T_14[5:3]; // @[package.scala:243:46] wire beatsDO_opdata_4 = out_4_d_bits_opcode[0]; // @[Xbar.scala:216:19] wire [2:0] beatsDO_4 = beatsDO_opdata_4 ? beatsDO_decode_4 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] wire [20:0] _beatsDO_decode_T_15 = 21'h3F << out_5_d_bits_size; // @[package.scala:243:71] wire [5:0] _beatsDO_decode_T_16 = _beatsDO_decode_T_15[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _beatsDO_decode_T_17 = ~_beatsDO_decode_T_16; // @[package.scala:243:{46,76}] wire [2:0] beatsDO_decode_5 = _beatsDO_decode_T_17[5:3]; // @[package.scala:243:46] wire beatsDO_opdata_5 = out_5_d_bits_opcode[0]; // @[Xbar.scala:216:19] wire [2:0] beatsDO_5 = beatsDO_opdata_5 ? beatsDO_decode_5 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] wire [20:0] _beatsDO_decode_T_18 = 21'h3F << out_6_d_bits_size; // @[package.scala:243:71] wire [5:0] _beatsDO_decode_T_19 = _beatsDO_decode_T_18[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _beatsDO_decode_T_20 = ~_beatsDO_decode_T_19; // @[package.scala:243:{46,76}] wire [2:0] beatsDO_decode_6 = _beatsDO_decode_T_20[5:3]; // @[package.scala:243:46] wire [2:0] beatsDO_6 = beatsDO_decode_6; // @[Edges.scala:220:59, :221:14] wire [20:0] _beatsDO_decode_T_21 = 21'h3F << out_7_d_bits_size; // @[package.scala:243:71] wire [5:0] _beatsDO_decode_T_22 = _beatsDO_decode_T_21[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _beatsDO_decode_T_23 = ~_beatsDO_decode_T_22; // @[package.scala:243:{46,76}] wire [2:0] beatsDO_decode_7 = _beatsDO_decode_T_23[5:3]; // @[package.scala:243:46] wire beatsDO_opdata_7 = out_7_d_bits_opcode[0]; // @[Xbar.scala:216:19] wire [2:0] beatsDO_7 = beatsDO_opdata_7 ? beatsDO_decode_7 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] wire _portsAOI_filtered_0_valid_T_1; // @[Xbar.scala:355:40] assign out_0_a_valid = portsAOI_filtered_0_valid; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_opcode = portsAOI_filtered_0_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_param = portsAOI_filtered_0_bits_param; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_size = portsAOI_filtered_0_bits_size; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_source = portsAOI_filtered_0_bits_source; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_address = portsAOI_filtered_0_bits_address; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_mask = portsAOI_filtered_0_bits_mask; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_data = portsAOI_filtered_0_bits_data; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_corrupt = portsAOI_filtered_0_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire _portsAOI_filtered_1_valid_T_1; // @[Xbar.scala:355:40] assign out_1_a_valid = portsAOI_filtered_1_valid; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_opcode = portsAOI_filtered_1_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_param = portsAOI_filtered_1_bits_param; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_size = portsAOI_filtered_1_bits_size; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_source = portsAOI_filtered_1_bits_source; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_address = portsAOI_filtered_1_bits_address; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_mask = portsAOI_filtered_1_bits_mask; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_data = portsAOI_filtered_1_bits_data; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_corrupt = portsAOI_filtered_1_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire _portsAOI_filtered_2_valid_T_1; // @[Xbar.scala:355:40] assign out_2_a_valid = portsAOI_filtered_2_valid; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_opcode = portsAOI_filtered_2_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_param = portsAOI_filtered_2_bits_param; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_size = portsAOI_filtered_2_bits_size; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_source = portsAOI_filtered_2_bits_source; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_address = portsAOI_filtered_2_bits_address; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_mask = portsAOI_filtered_2_bits_mask; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_data = portsAOI_filtered_2_bits_data; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_corrupt = portsAOI_filtered_2_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire _portsAOI_filtered_3_valid_T_1; // @[Xbar.scala:355:40] assign out_3_a_valid = portsAOI_filtered_3_valid; // @[Xbar.scala:216:19, :352:24] assign out_3_a_bits_opcode = portsAOI_filtered_3_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign out_3_a_bits_param = portsAOI_filtered_3_bits_param; // @[Xbar.scala:216:19, :352:24] assign out_3_a_bits_size = portsAOI_filtered_3_bits_size; // @[Xbar.scala:216:19, :352:24] assign out_3_a_bits_source = portsAOI_filtered_3_bits_source; // @[Xbar.scala:216:19, :352:24] assign out_3_a_bits_address = portsAOI_filtered_3_bits_address; // @[Xbar.scala:216:19, :352:24] assign out_3_a_bits_mask = portsAOI_filtered_3_bits_mask; // @[Xbar.scala:216:19, :352:24] assign out_3_a_bits_data = portsAOI_filtered_3_bits_data; // @[Xbar.scala:216:19, :352:24] assign out_3_a_bits_corrupt = portsAOI_filtered_3_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire _portsAOI_filtered_4_valid_T_1; // @[Xbar.scala:355:40] assign out_4_a_valid = portsAOI_filtered_4_valid; // @[Xbar.scala:216:19, :352:24] assign out_4_a_bits_opcode = portsAOI_filtered_4_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign out_4_a_bits_param = portsAOI_filtered_4_bits_param; // @[Xbar.scala:216:19, :352:24] assign out_4_a_bits_size = portsAOI_filtered_4_bits_size; // @[Xbar.scala:216:19, :352:24] assign out_4_a_bits_source = portsAOI_filtered_4_bits_source; // @[Xbar.scala:216:19, :352:24] assign out_4_a_bits_address = portsAOI_filtered_4_bits_address; // @[Xbar.scala:216:19, :352:24] assign out_4_a_bits_mask = portsAOI_filtered_4_bits_mask; // @[Xbar.scala:216:19, :352:24] assign out_4_a_bits_data = portsAOI_filtered_4_bits_data; // @[Xbar.scala:216:19, :352:24] assign out_4_a_bits_corrupt = portsAOI_filtered_4_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire _portsAOI_filtered_5_valid_T_1; // @[Xbar.scala:355:40] assign out_5_a_valid = portsAOI_filtered_5_valid; // @[Xbar.scala:216:19, :352:24] assign out_5_a_bits_opcode = portsAOI_filtered_5_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign out_5_a_bits_param = portsAOI_filtered_5_bits_param; // @[Xbar.scala:216:19, :352:24] assign out_5_a_bits_size = portsAOI_filtered_5_bits_size; // @[Xbar.scala:216:19, :352:24] assign out_5_a_bits_source = portsAOI_filtered_5_bits_source; // @[Xbar.scala:216:19, :352:24] assign out_5_a_bits_address = portsAOI_filtered_5_bits_address; // @[Xbar.scala:216:19, :352:24] assign out_5_a_bits_mask = portsAOI_filtered_5_bits_mask; // @[Xbar.scala:216:19, :352:24] assign out_5_a_bits_data = portsAOI_filtered_5_bits_data; // @[Xbar.scala:216:19, :352:24] assign out_5_a_bits_corrupt = portsAOI_filtered_5_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire _portsAOI_filtered_6_valid_T_1; // @[Xbar.scala:355:40] assign out_6_a_valid = portsAOI_filtered_6_valid; // @[Xbar.scala:216:19, :352:24] assign out_6_a_bits_opcode = portsAOI_filtered_6_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign out_6_a_bits_param = portsAOI_filtered_6_bits_param; // @[Xbar.scala:216:19, :352:24] assign out_6_a_bits_size = portsAOI_filtered_6_bits_size; // @[Xbar.scala:216:19, :352:24] assign out_6_a_bits_source = portsAOI_filtered_6_bits_source; // @[Xbar.scala:216:19, :352:24] assign out_6_a_bits_address = portsAOI_filtered_6_bits_address; // @[Xbar.scala:216:19, :352:24] assign out_6_a_bits_mask = portsAOI_filtered_6_bits_mask; // @[Xbar.scala:216:19, :352:24] assign out_6_a_bits_data = portsAOI_filtered_6_bits_data; // @[Xbar.scala:216:19, :352:24] assign out_6_a_bits_corrupt = portsAOI_filtered_6_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire _portsAOI_filtered_7_valid_T_1; // @[Xbar.scala:355:40] assign out_7_a_valid = portsAOI_filtered_7_valid; // @[Xbar.scala:216:19, :352:24] assign out_7_a_bits_opcode = portsAOI_filtered_7_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign out_7_a_bits_param = portsAOI_filtered_7_bits_param; // @[Xbar.scala:216:19, :352:24] assign out_7_a_bits_size = portsAOI_filtered_7_bits_size; // @[Xbar.scala:216:19, :352:24] assign out_7_a_bits_source = portsAOI_filtered_7_bits_source; // @[Xbar.scala:216:19, :352:24] assign out_7_a_bits_address = portsAOI_filtered_7_bits_address; // @[Xbar.scala:216:19, :352:24] assign out_7_a_bits_mask = portsAOI_filtered_7_bits_mask; // @[Xbar.scala:216:19, :352:24] assign out_7_a_bits_data = portsAOI_filtered_7_bits_data; // @[Xbar.scala:216:19, :352:24] assign out_7_a_bits_corrupt = portsAOI_filtered_7_bits_corrupt; // @[Xbar.scala:216:19, :352:24] assign _portsAOI_filtered_0_valid_T_1 = in_0_a_valid & _portsAOI_filtered_0_valid_T; // @[Xbar.scala:159:18, :355:{40,54}] assign portsAOI_filtered_0_valid = _portsAOI_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40] assign _portsAOI_filtered_1_valid_T_1 = in_0_a_valid & _portsAOI_filtered_1_valid_T; // @[Xbar.scala:159:18, :355:{40,54}] assign portsAOI_filtered_1_valid = _portsAOI_filtered_1_valid_T_1; // @[Xbar.scala:352:24, :355:40] assign _portsAOI_filtered_2_valid_T_1 = in_0_a_valid & _portsAOI_filtered_2_valid_T; // @[Xbar.scala:159:18, :355:{40,54}] assign portsAOI_filtered_2_valid = _portsAOI_filtered_2_valid_T_1; // @[Xbar.scala:352:24, :355:40] assign _portsAOI_filtered_3_valid_T_1 = in_0_a_valid & _portsAOI_filtered_3_valid_T; // @[Xbar.scala:159:18, :355:{40,54}] assign portsAOI_filtered_3_valid = _portsAOI_filtered_3_valid_T_1; // @[Xbar.scala:352:24, :355:40] assign _portsAOI_filtered_4_valid_T_1 = in_0_a_valid & _portsAOI_filtered_4_valid_T; // @[Xbar.scala:159:18, :355:{40,54}] assign portsAOI_filtered_4_valid = _portsAOI_filtered_4_valid_T_1; // @[Xbar.scala:352:24, :355:40] assign _portsAOI_filtered_5_valid_T_1 = in_0_a_valid & _portsAOI_filtered_5_valid_T; // @[Xbar.scala:159:18, :355:{40,54}] assign portsAOI_filtered_5_valid = _portsAOI_filtered_5_valid_T_1; // @[Xbar.scala:352:24, :355:40] assign _portsAOI_filtered_6_valid_T_1 = in_0_a_valid & _portsAOI_filtered_6_valid_T; // @[Xbar.scala:159:18, :355:{40,54}] assign portsAOI_filtered_6_valid = _portsAOI_filtered_6_valid_T_1; // @[Xbar.scala:352:24, :355:40] assign _portsAOI_filtered_7_valid_T_1 = in_0_a_valid & _portsAOI_filtered_7_valid_T; // @[Xbar.scala:159:18, :355:{40,54}] assign portsAOI_filtered_7_valid = _portsAOI_filtered_7_valid_T_1; // @[Xbar.scala:352:24, :355:40] wire _portsAOI_in_0_a_ready_T = requestAIO_0_0 & portsAOI_filtered_0_ready; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_1 = requestAIO_0_1 & portsAOI_filtered_1_ready; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_2 = requestAIO_0_2 & portsAOI_filtered_2_ready; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_3 = requestAIO_0_3 & portsAOI_filtered_3_ready; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_4 = requestAIO_0_4 & portsAOI_filtered_4_ready; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_5 = requestAIO_0_5 & portsAOI_filtered_5_ready; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_6 = requestAIO_0_6 & portsAOI_filtered_6_ready; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_7 = requestAIO_0_7 & portsAOI_filtered_7_ready; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_8 = _portsAOI_in_0_a_ready_T | _portsAOI_in_0_a_ready_T_1; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_9 = _portsAOI_in_0_a_ready_T_8 | _portsAOI_in_0_a_ready_T_2; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_10 = _portsAOI_in_0_a_ready_T_9 | _portsAOI_in_0_a_ready_T_3; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_11 = _portsAOI_in_0_a_ready_T_10 | _portsAOI_in_0_a_ready_T_4; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_12 = _portsAOI_in_0_a_ready_T_11 | _portsAOI_in_0_a_ready_T_5; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_13 = _portsAOI_in_0_a_ready_T_12 | _portsAOI_in_0_a_ready_T_6; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_14 = _portsAOI_in_0_a_ready_T_13 | _portsAOI_in_0_a_ready_T_7; // @[Mux.scala:30:73] assign _portsAOI_in_0_a_ready_WIRE = _portsAOI_in_0_a_ready_T_14; // @[Mux.scala:30:73] assign in_0_a_ready = _portsAOI_in_0_a_ready_WIRE; // @[Mux.scala:30:73] wire _filtered_0_ready_T; // @[Arbiter.scala:94:31] assign out_0_d_ready = portsDIO_filtered_0_ready; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_0_valid; // @[Xbar.scala:352:24] assign portsDIO_filtered_0_valid = _portsDIO_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40] wire _filtered_0_ready_T_1; // @[Arbiter.scala:94:31] assign out_1_d_ready = portsDIO_filtered_1_0_ready; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_1_0_valid; // @[Xbar.scala:352:24] assign portsDIO_filtered_1_0_valid = _portsDIO_filtered_0_valid_T_3; // @[Xbar.scala:352:24, :355:40] wire _filtered_0_ready_T_2; // @[Arbiter.scala:94:31] assign out_2_d_ready = portsDIO_filtered_2_0_ready; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_2_0_valid; // @[Xbar.scala:352:24] assign portsDIO_filtered_2_0_valid = _portsDIO_filtered_0_valid_T_5; // @[Xbar.scala:352:24, :355:40] wire _filtered_0_ready_T_3; // @[Arbiter.scala:94:31] assign out_3_d_ready = portsDIO_filtered_3_0_ready; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_3_0_valid; // @[Xbar.scala:352:24] assign portsDIO_filtered_3_0_valid = _portsDIO_filtered_0_valid_T_7; // @[Xbar.scala:352:24, :355:40] wire _filtered_0_ready_T_4; // @[Arbiter.scala:94:31] assign out_4_d_ready = portsDIO_filtered_4_0_ready; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_4_0_valid; // @[Xbar.scala:352:24] assign portsDIO_filtered_4_0_valid = _portsDIO_filtered_0_valid_T_9; // @[Xbar.scala:352:24, :355:40] wire _filtered_0_ready_T_5; // @[Arbiter.scala:94:31] assign out_5_d_ready = portsDIO_filtered_5_0_ready; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_5_0_valid; // @[Xbar.scala:352:24] assign portsDIO_filtered_5_0_valid = _portsDIO_filtered_0_valid_T_11; // @[Xbar.scala:352:24, :355:40] wire _filtered_0_ready_T_6; // @[Arbiter.scala:94:31] assign out_6_d_ready = portsDIO_filtered_6_0_ready; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_6_0_valid; // @[Xbar.scala:352:24] assign portsDIO_filtered_6_0_valid = _portsDIO_filtered_0_valid_T_13; // @[Xbar.scala:352:24, :355:40] wire _filtered_0_ready_T_7; // @[Arbiter.scala:94:31] assign out_7_d_ready = portsDIO_filtered_7_0_ready; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_7_0_valid; // @[Xbar.scala:352:24] assign portsDIO_filtered_7_0_valid = _portsDIO_filtered_0_valid_T_15; // @[Xbar.scala:352:24, :355:40] reg [8:0] beatsLeft; // @[Arbiter.scala:60:30] wire idle = beatsLeft == 9'h0; // @[Arbiter.scala:60:30, :61:28] wire latch = idle & in_0_d_ready; // @[Xbar.scala:159:18] wire [1:0] readys_lo_lo = {portsDIO_filtered_1_0_valid, portsDIO_filtered_0_valid}; // @[Xbar.scala:352:24] wire [1:0] readys_lo_hi = {portsDIO_filtered_3_0_valid, portsDIO_filtered_2_0_valid}; // @[Xbar.scala:352:24] wire [3:0] readys_lo = {readys_lo_hi, readys_lo_lo}; // @[Arbiter.scala:68:51] wire [1:0] readys_hi_lo = {portsDIO_filtered_5_0_valid, portsDIO_filtered_4_0_valid}; // @[Xbar.scala:352:24] wire [1:0] readys_hi_hi = {portsDIO_filtered_7_0_valid, portsDIO_filtered_6_0_valid}; // @[Xbar.scala:352:24] wire [3:0] readys_hi = {readys_hi_hi, readys_hi_lo}; // @[Arbiter.scala:68:51] wire [7:0] _readys_T = {readys_hi, readys_lo}; // @[Arbiter.scala:68:51] wire [7:0] readys_valid = _readys_T; // @[Arbiter.scala:21:23, :68:51] wire _readys_T_1 = readys_valid == _readys_T; // @[Arbiter.scala:21:23, :22:19, :68:51] wire _readys_T_3 = ~_readys_T_2; // @[Arbiter.scala:22:12] wire _readys_T_4 = ~_readys_T_1; // @[Arbiter.scala:22:{12,19}] reg [7:0] readys_mask; // @[Arbiter.scala:23:23] wire [7:0] _readys_filter_T = ~readys_mask; // @[Arbiter.scala:23:23, :24:30] wire [7:0] _readys_filter_T_1 = readys_valid & _readys_filter_T; // @[Arbiter.scala:21:23, :24:{28,30}] wire [15:0] readys_filter = {_readys_filter_T_1, readys_valid}; // @[Arbiter.scala:21:23, :24:{21,28}] wire [14:0] _readys_unready_T = readys_filter[15:1]; // @[package.scala:262:48] wire [15:0] _readys_unready_T_1 = {readys_filter[15], readys_filter[14:0] | _readys_unready_T}; // @[package.scala:262:{43,48}] wire [13:0] _readys_unready_T_2 = _readys_unready_T_1[15:2]; // @[package.scala:262:{43,48}] wire [15:0] _readys_unready_T_3 = {_readys_unready_T_1[15:14], _readys_unready_T_1[13:0] | _readys_unready_T_2}; // @[package.scala:262:{43,48}] wire [11:0] _readys_unready_T_4 = _readys_unready_T_3[15:4]; // @[package.scala:262:{43,48}] wire [15:0] _readys_unready_T_5 = {_readys_unready_T_3[15:12], _readys_unready_T_3[11:0] | _readys_unready_T_4}; // @[package.scala:262:{43,48}] wire [15:0] _readys_unready_T_6 = _readys_unready_T_5; // @[package.scala:262:43, :263:17] wire [14:0] _readys_unready_T_7 = _readys_unready_T_6[15:1]; // @[package.scala:263:17] wire [15:0] _readys_unready_T_8 = {readys_mask, 8'h0}; // @[Arbiter.scala:23:23, :25:66] wire [15:0] readys_unready = {1'h0, _readys_unready_T_7} | _readys_unready_T_8; // @[Arbiter.scala:25:{52,58,66}] wire [7:0] _readys_readys_T = readys_unready[15:8]; // @[Arbiter.scala:25:58, :26:29] wire [7:0] _readys_readys_T_1 = readys_unready[7:0]; // @[Arbiter.scala:25:58, :26:48] wire [7:0] _readys_readys_T_2 = _readys_readys_T & _readys_readys_T_1; // @[Arbiter.scala:26:{29,39,48}] wire [7:0] readys_readys = ~_readys_readys_T_2; // @[Arbiter.scala:26:{18,39}] wire [7:0] _readys_T_7 = readys_readys; // @[Arbiter.scala:26:18, :30:11] wire _readys_T_5 = |readys_valid; // @[Arbiter.scala:21:23, :27:27] wire _readys_T_6 = latch & _readys_T_5; // @[Arbiter.scala:27:{18,27}, :62:24] wire [7:0] _readys_mask_T = readys_readys & readys_valid; // @[Arbiter.scala:21:23, :26:18, :28:29] wire [8:0] _readys_mask_T_1 = {_readys_mask_T, 1'h0}; // @[package.scala:253:48] wire [7:0] _readys_mask_T_2 = _readys_mask_T_1[7:0]; // @[package.scala:253:{48,53}] wire [7:0] _readys_mask_T_3 = _readys_mask_T | _readys_mask_T_2; // @[package.scala:253:{43,53}] wire [9:0] _readys_mask_T_4 = {_readys_mask_T_3, 2'h0}; // @[package.scala:253:{43,48}] wire [7:0] _readys_mask_T_5 = _readys_mask_T_4[7:0]; // @[package.scala:253:{48,53}] wire [7:0] _readys_mask_T_6 = _readys_mask_T_3 | _readys_mask_T_5; // @[package.scala:253:{43,53}] wire [11:0] _readys_mask_T_7 = {_readys_mask_T_6, 4'h0}; // @[package.scala:253:{43,48}] wire [7:0] _readys_mask_T_8 = _readys_mask_T_7[7:0]; // @[package.scala:253:{48,53}] wire [7:0] _readys_mask_T_9 = _readys_mask_T_6 | _readys_mask_T_8; // @[package.scala:253:{43,53}] wire [7:0] _readys_mask_T_10 = _readys_mask_T_9; // @[package.scala:253:43, :254:17] wire _readys_T_8 = _readys_T_7[0]; // @[Arbiter.scala:30:11, :68:76] wire readys_0 = _readys_T_8; // @[Arbiter.scala:68:{27,76}] wire _readys_T_9 = _readys_T_7[1]; // @[Arbiter.scala:30:11, :68:76] wire readys_1 = _readys_T_9; // @[Arbiter.scala:68:{27,76}] wire _readys_T_10 = _readys_T_7[2]; // @[Arbiter.scala:30:11, :68:76] wire readys_2 = _readys_T_10; // @[Arbiter.scala:68:{27,76}] wire _readys_T_11 = _readys_T_7[3]; // @[Arbiter.scala:30:11, :68:76] wire readys_3 = _readys_T_11; // @[Arbiter.scala:68:{27,76}] wire _readys_T_12 = _readys_T_7[4]; // @[Arbiter.scala:30:11, :68:76] wire readys_4 = _readys_T_12; // @[Arbiter.scala:68:{27,76}] wire _readys_T_13 = _readys_T_7[5]; // @[Arbiter.scala:30:11, :68:76] wire readys_5 = _readys_T_13; // @[Arbiter.scala:68:{27,76}] wire _readys_T_14 = _readys_T_7[6]; // @[Arbiter.scala:30:11, :68:76] wire readys_6 = _readys_T_14; // @[Arbiter.scala:68:{27,76}] wire _readys_T_15 = _readys_T_7[7]; // @[Arbiter.scala:30:11, :68:76] wire readys_7 = _readys_T_15; // @[Arbiter.scala:68:{27,76}] wire _winner_T = readys_0 & portsDIO_filtered_0_valid; // @[Xbar.scala:352:24] wire winner_0 = _winner_T; // @[Arbiter.scala:71:{27,69}] wire _winner_T_1 = readys_1 & portsDIO_filtered_1_0_valid; // @[Xbar.scala:352:24] wire winner_1 = _winner_T_1; // @[Arbiter.scala:71:{27,69}] wire _winner_T_2 = readys_2 & portsDIO_filtered_2_0_valid; // @[Xbar.scala:352:24] wire winner_2 = _winner_T_2; // @[Arbiter.scala:71:{27,69}] wire _winner_T_3 = readys_3 & portsDIO_filtered_3_0_valid; // @[Xbar.scala:352:24] wire winner_3 = _winner_T_3; // @[Arbiter.scala:71:{27,69}] wire _winner_T_4 = readys_4 & portsDIO_filtered_4_0_valid; // @[Xbar.scala:352:24] wire winner_4 = _winner_T_4; // @[Arbiter.scala:71:{27,69}] wire _winner_T_5 = readys_5 & portsDIO_filtered_5_0_valid; // @[Xbar.scala:352:24] wire winner_5 = _winner_T_5; // @[Arbiter.scala:71:{27,69}] wire _winner_T_6 = readys_6 & portsDIO_filtered_6_0_valid; // @[Xbar.scala:352:24] wire winner_6 = _winner_T_6; // @[Arbiter.scala:71:{27,69}] wire _winner_T_7 = readys_7 & portsDIO_filtered_7_0_valid; // @[Xbar.scala:352:24] wire winner_7 = _winner_T_7; // @[Arbiter.scala:71:{27,69}] wire prefixOR_1 = winner_0; // @[Arbiter.scala:71:27, :76:48] wire prefixOR_2 = prefixOR_1 | winner_1; // @[Arbiter.scala:71:27, :76:48] wire prefixOR_3 = prefixOR_2 | winner_2; // @[Arbiter.scala:71:27, :76:48] wire prefixOR_4 = prefixOR_3 | winner_3; // @[Arbiter.scala:71:27, :76:48] wire prefixOR_5 = prefixOR_4 | winner_4; // @[Arbiter.scala:71:27, :76:48] wire prefixOR_6 = prefixOR_5 | winner_5; // @[Arbiter.scala:71:27, :76:48] wire prefixOR_7 = prefixOR_6 | winner_6; // @[Arbiter.scala:71:27, :76:48] wire _prefixOR_T = prefixOR_7 | winner_7; // @[Arbiter.scala:71:27, :76:48] wire _in_0_d_valid_T = portsDIO_filtered_0_valid | portsDIO_filtered_1_0_valid; // @[Xbar.scala:352:24]
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w1_d3_i0_118( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7] wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire io_q_0; // @[SynchronizerReg.scala:80:7] assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_206 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_1), // @[SynchronizerReg.scala:87:41] .io_q (output_0) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Nodes.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection} case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args)) object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle] { def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo) def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo) def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle) def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle) def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString) override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = { val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge))) monitor.io.in := bundle } override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters = pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }) override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters = pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }) } trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut] case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode case class TLAdapterNode( clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s }, managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLJunctionNode( clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters], managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])( implicit valName: ValName) extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode object TLNameNode { def apply(name: ValName) = TLIdentityNode()(name) def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLIdentityNode = apply(Some(name)) } case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)() object TLTempNode { def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp")) } case class TLNexusNode( clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters, managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)( implicit valName: ValName) extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode abstract class TLCustomNode(implicit valName: ValName) extends CustomNode(TLImp) with TLFormatNode // Asynchronous crossings trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters] object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle] { def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle) def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString) override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLAsyncAdapterNode( clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s }, managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode object TLAsyncNameNode { def apply(name: ValName) = TLAsyncIdentityNode()(name) def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLAsyncIdentityNode = apply(Some(name)) } case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLAsyncImp)( dFn = { p => TLAsyncClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName) extends MixedAdapterNode(TLAsyncImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) }, uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut] // Rationally related crossings trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters] object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle] { def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle) def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */) override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLRationalAdapterNode( clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s }, managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode object TLRationalNameNode { def apply(name: ValName) = TLRationalIdentityNode()(name) def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLRationalIdentityNode = apply(Some(name)) } case class TLRationalSourceNode()(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLRationalImp)( dFn = { p => TLRationalClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName) extends MixedAdapterNode(TLRationalImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut] // Credited version of TileLink channels trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters] object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle] { def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle) def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString) override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLCreditedAdapterNode( clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s }, managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode object TLCreditedNameNode { def apply(name: ValName) = TLCreditedIdentityNode()(name) def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLCreditedIdentityNode = apply(Some(name)) } case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLCreditedImp)( dFn = { p => TLCreditedClientPortParameters(delay, p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLCreditedImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut] File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File Parameters.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ import freechips.rocketchip.util.property.cover import scala.math.{min,max} case class CacheParameters( level: Int, ways: Int, sets: Int, blockBytes: Int, beatBytes: Int, // inner hintsSkipProbe: Boolean) { require (ways > 0) require (sets > 0) require (blockBytes > 0 && isPow2(blockBytes)) require (beatBytes > 0 && isPow2(beatBytes)) require (blockBytes >= beatBytes) val blocks = ways * sets val sizeBytes = blocks * blockBytes val blockBeats = blockBytes/beatBytes } case class InclusiveCachePortParameters( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams) { def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e)) } object InclusiveCachePortParameters { val none = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.none) val full = InclusiveCachePortParameters( a = BufferParams.default, b = BufferParams.default, c = BufferParams.default, d = BufferParams.default, e = BufferParams.default) // This removes feed-through paths from C=>A and A=>C val fullC = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.default, d = BufferParams.none, e = BufferParams.none) val flowAD = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.flow, e = BufferParams.none) val flowAE = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.flow) // For innerBuf: // SinkA: no restrictions, flows into scheduler+putbuffer // SourceB: no restrictions, flows out of scheduler // sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore // SourceD: no restrictions, flows out of bankedStore/regout // SinkE: no restrictions, flows into scheduler // // ... so while none is possible, you probably want at least flowAC to cut ready // from the scheduler delay and flowD to ease SourceD back-pressure // For outerBufer: // SourceA: must not be pipe, flows out of scheduler // SinkB: no restrictions, flows into scheduler // SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored // SinkD: no restrictions, flows into scheduler & bankedStore // SourceE: must not be pipe, flows out of scheduler // // ... AE take the channel ready into the scheduler, so you need at least flowAE } case class InclusiveCacheMicroParameters( writeBytes: Int, // backing store update granularity memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz) portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes dirReg: Boolean = false, innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE { require (writeBytes > 0 && isPow2(writeBytes)) require (memCycles > 0) require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant } case class InclusiveCacheControlParameters( address: BigInt, beatBytes: Int, bankedControl: Boolean) case class InclusiveCacheParameters( cache: CacheParameters, micro: InclusiveCacheMicroParameters, control: Boolean, inner: TLEdgeIn, outer: TLEdgeOut)(implicit val p: Parameters) { require (cache.ways > 1) require (cache.sets > 1 && isPow2(cache.sets)) require (micro.writeBytes <= inner.manager.beatBytes) require (micro.writeBytes <= outer.manager.beatBytes) require (inner.manager.beatBytes <= cache.blockBytes) require (outer.manager.beatBytes <= cache.blockBytes) // Require that all cached address ranges have contiguous blocks outer.manager.managers.flatMap(_.address).foreach { a => require (a.alignment >= cache.blockBytes) } // If we are the first level cache, we do not need to support inner-BCE val firstLevel = !inner.client.clients.exists(_.supports.probe) // If we are the last level cache, we do not need to support outer-B val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED) require (lastLevel) // Provision enough resources to achieve full throughput with missing single-beat accesses val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro) val secondary = max(mshrs, micro.memCycles - mshrs) val putLists = micro.memCycles // allow every request to be single beat val putBeats = max(2*cache.blockBeats, micro.memCycles) val relLists = 2 val relBeats = relLists*cache.blockBeats val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address)) val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_)) def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] = if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail) val addressMapping = bitOffsets(pickMask) val addressBits = addressMapping.size // println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}") val allClients = inner.client.clients.size val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size val clientBits = max(1, clientBitsRaw) val stateBits = 2 val wayBits = log2Ceil(cache.ways) val setBits = log2Ceil(cache.sets) val offsetBits = log2Ceil(cache.blockBytes) val tagBits = addressBits - setBits - offsetBits val putBits = log2Ceil(max(putLists, relLists)) require (tagBits > 0) require (offsetBits > 0) val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1 val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1 val innerMaskBits = inner.manager.beatBytes / micro.writeBytes val outerMaskBits = outer.manager.beatBytes / micro.writeBytes def clientBit(source: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse) } } def clientSource(bit: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U)) } } def parseAddress(x: UInt): (UInt, UInt, UInt) = { val offset = Cat(addressMapping.map(o => x(o,o)).reverse) val set = offset >> offsetBits val tag = set >> setBits (tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0)) } def widen(x: UInt, width: Int): UInt = { val y = x | 0.U(width.W) assert (y >> width === 0.U) y(width-1, 0) } def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = { val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits)) val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) } addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) } Cat(bits.reverse) } def restoreAddress(expanded: UInt): UInt = { val missingBits = flatAddresses .map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match .groupBy(_._1) .view .mapValues(_.map(_._2)) val muxMask = AddressDecoder(missingBits.values.toList) val mux = missingBits.toList.map { case (bits, addrs) => val widen = addrs.map(_.widen(~muxMask)) val matches = AddressSet .unify(widen.distinct) .map(_.contains(expanded)) .reduce(_ || _) (matches, bits.U) } expanded | Mux1H(mux) } def dirReg[T <: Data](x: T, en: Bool = true.B): T = { if (micro.dirReg) RegEnable(x, en) else x } def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc) } object MetaData { val stateBits = 2 def INVALID: UInt = 0.U(stateBits.W) // way is empty def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch // Does a request need trunk? def needT(opcode: UInt, param: UInt): Bool = { !opcode(2) || (opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) || ((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB) } // Does a request prove the client need not be probed? def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = { // Acquire(toB) and Get => is N, so no probe // Acquire(*toT) => is N or B, but need T, so no probe // Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client // Put* => is N or B, so probe IS needed opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B) } def isToN(param: UInt): Bool = { param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN } def isToB(param: UInt): Bool = { param === TLPermissions.TtoB || param === TLPermissions.BtoB } } object InclusiveCacheParameters { val lfsrBits = 10 val L2ControlAddress = 0x2010000 val L2ControlSize = 0x1000 def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = { // We need 2-3 normal MSHRs to cover the Directory latency // To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats) } def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = // We need a dedicated MSHR for B+C each 2 + out_mshrs(cache, micro) } class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } } File InclusiveCache.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import freechips.rocketchip.diplomacy._ import freechips.rocketchip.subsystem.{SubsystemBankedCoherenceKey} import freechips.rocketchip.regmapper._ import freechips.rocketchip.tilelink._ class InclusiveCache( val cache: CacheParameters, val micro: InclusiveCacheMicroParameters, control: Option[InclusiveCacheControlParameters] = None )(implicit p: Parameters) extends LazyModule { val access = TransferSizes(1, cache.blockBytes) val xfer = TransferSizes(cache.blockBytes, cache.blockBytes) val atom = TransferSizes(1, cache.beatBytes) var resourcesOpt: Option[ResourceBindings] = None val device: SimpleDevice = new SimpleDevice("cache-controller", Seq("sifive,inclusivecache0", "cache")) { def ofInt(x: Int) = Seq(ResourceInt(BigInt(x))) override def describe(resources: ResourceBindings): Description = { resourcesOpt = Some(resources) val Description(name, mapping) = super.describe(resources) // Find the outer caches val outer = node.edges.out .flatMap(_.manager.managers) .filter(_.supportsAcquireB) .flatMap(_.resources.headOption) .map(_.owner.label) .distinct val nextlevel: Option[(String, Seq[ResourceValue])] = if (outer.isEmpty) { None } else { Some("next-level-cache" -> outer.map(l => ResourceReference(l)).toList) } val extra = Map( "cache-level" -> ofInt(2), "cache-unified" -> Nil, "cache-size" -> ofInt(cache.sizeBytes * node.edges.in.size), "cache-sets" -> ofInt(cache.sets * node.edges.in.size), "cache-block-size" -> ofInt(cache.blockBytes), "sifive,mshr-count" -> ofInt(InclusiveCacheParameters.all_mshrs(cache, micro))) Description(name, mapping ++ extra ++ nextlevel) } } val node: TLAdapterNode = TLAdapterNode( clientFn = { _ => TLClientPortParameters(Seq(TLClientParameters( name = s"L${cache.level} InclusiveCache", sourceId = IdRange(0, InclusiveCacheParameters.out_mshrs(cache, micro)), supportsProbe = xfer))) }, managerFn = { m => TLManagerPortParameters( managers = m.managers.map { m => m.copy( regionType = if (m.regionType >= RegionType.UNCACHED) RegionType.CACHED else m.regionType, resources = Resource(device, "caches") +: m.resources, supportsAcquireB = xfer, supportsAcquireT = if (m.supportsAcquireT) xfer else TransferSizes.none, supportsArithmetic = if (m.supportsAcquireT) atom else TransferSizes.none, supportsLogical = if (m.supportsAcquireT) atom else TransferSizes.none, supportsGet = access, supportsPutFull = if (m.supportsAcquireT) access else TransferSizes.none, supportsPutPartial = if (m.supportsAcquireT) access else TransferSizes.none, supportsHint = access, alwaysGrantsT = false, fifoId = None) }, beatBytes = cache.beatBytes, endSinkId = InclusiveCacheParameters.all_mshrs(cache, micro), minLatency = 2) }) val ctrls = control.map { c => val nCtrls = if (c.bankedControl) p(SubsystemBankedCoherenceKey).nBanks else 1 Seq.tabulate(nCtrls) { i => LazyModule(new InclusiveCacheControl(this, c.copy(address = c.address + i * InclusiveCacheParameters.L2ControlSize))) } }.getOrElse(Nil) lazy val module = new Impl class Impl extends LazyModuleImp(this) { // If you have a control port, you must have at least one cache port require (ctrls.isEmpty || !node.edges.in.isEmpty) // Extract the client IdRanges; must be the same on all ports! val clientIds = node.edges.in.headOption.map(_.client.clients.map(_.sourceId).sortBy(_.start)) node.edges.in.foreach { e => require(e.client.clients.map(_.sourceId).sortBy(_.start) == clientIds.get) } // Use the natural ordering of clients (just like in Directory) node.edges.in.headOption.foreach { n => println(s"L${cache.level} InclusiveCache Client Map:") n.client.clients.zipWithIndex.foreach { case (c,i) => println(s"\t${i} <= ${c.name}") } println("") } // Create the L2 Banks val mods = (node.in zip node.out) map { case ((in, edgeIn), (out, edgeOut)) => edgeOut.manager.managers.foreach { m => require (m.supportsAcquireB.contains(xfer), s"All managers behind the L2 must support acquireB($xfer) " + s"but ${m.name} only supports (${m.supportsAcquireB})!") if (m.supportsAcquireT) require (m.supportsAcquireT.contains(xfer), s"Any probing managers behind the L2 must support acquireT($xfer) " + s"but ${m.name} only supports (${m.supportsAcquireT})!") } val params = InclusiveCacheParameters(cache, micro, !ctrls.isEmpty, edgeIn, edgeOut) val scheduler = Module(new InclusiveCacheBankScheduler(params)).suggestName("inclusive_cache_bank_sched") scheduler.io.in <> in out <> scheduler.io.out scheduler.io.ways := DontCare scheduler.io.divs := DontCare // Tie down default values in case there is no controller scheduler.io.req.valid := false.B scheduler.io.req.bits.address := 0.U scheduler.io.resp.ready := true.B // Fix-up the missing addresses. We do this here so that the Scheduler can be // deduplicated by Firrtl to make hierarchical place-and-route easier. out.a.bits.address := params.restoreAddress(scheduler.io.out.a.bits.address) in .b.bits.address := params.restoreAddress(scheduler.io.in .b.bits.address) out.c.bits.address := params.restoreAddress(scheduler.io.out.c.bits.address) scheduler } ctrls.foreach { ctrl => ctrl.module.io.flush_req.ready := false.B ctrl.module.io.flush_resp := false.B ctrl.module.io.flush_match := false.B } mods.zip(node.edges.in).zipWithIndex.foreach { case ((sched, edgeIn), i) => val ctrl = if (ctrls.size > 1) Some(ctrls(i)) else ctrls.headOption ctrl.foreach { ctrl => { val contained = edgeIn.manager.managers.flatMap(_.address) .map(_.contains(ctrl.module.io.flush_req.bits)).reduce(_||_) when (contained) { ctrl.module.io.flush_match := true.B } sched.io.req.valid := contained && ctrl.module.io.flush_req.valid sched.io.req.bits.address := ctrl.module.io.flush_req.bits when (contained && sched.io.req.ready) { ctrl.module.io.flush_req.ready := true.B } when (sched.io.resp.valid) { ctrl.module.io.flush_resp := true.B } sched.io.resp.ready := true.B }} } def json = s"""{"banks":[${mods.map(_.json).mkString(",")}]}""" } }
module InclusiveCache( // @[InclusiveCache.scala:108:9] input clock, // @[InclusiveCache.scala:108:9] input reset, // @[InclusiveCache.scala:108:9] output auto_ctrls_ctrl_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_ctrls_ctrl_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_ctrls_ctrl_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_ctrls_ctrl_in_a_bits_param, // @[LazyModuleImp.scala:107:25] input [1:0] auto_ctrls_ctrl_in_a_bits_size, // @[LazyModuleImp.scala:107:25] input [12:0] auto_ctrls_ctrl_in_a_bits_source, // @[LazyModuleImp.scala:107:25] input [25:0] auto_ctrls_ctrl_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_ctrls_ctrl_in_a_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_ctrls_ctrl_in_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_ctrls_ctrl_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_ctrls_ctrl_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_ctrls_ctrl_in_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_ctrls_ctrl_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_ctrls_ctrl_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output [12:0] auto_ctrls_ctrl_in_d_bits_source, // @[LazyModuleImp.scala:107:25] output [63:0] auto_ctrls_ctrl_in_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [31:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25] input [255:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_in_b_ready, // @[LazyModuleImp.scala:107:25] output auto_in_b_valid, // @[LazyModuleImp.scala:107:25] output [1:0] auto_in_b_bits_param, // @[LazyModuleImp.scala:107:25] output [31:0] auto_in_b_bits_address, // @[LazyModuleImp.scala:107:25] output auto_in_c_ready, // @[LazyModuleImp.scala:107:25] input auto_in_c_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_c_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_c_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_c_bits_size, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_c_bits_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_in_c_bits_address, // @[LazyModuleImp.scala:107:25] input [255:0] auto_in_c_bits_data, // @[LazyModuleImp.scala:107:25] input auto_in_c_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_in_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output [7:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25] output [4:0] auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25] output [255:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_in_e_valid, // @[LazyModuleImp.scala:107:25] input [4:0] auto_in_e_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_out_a_ready, // @[LazyModuleImp.scala:107:25] output auto_out_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25] output [4:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_out_c_ready, // @[LazyModuleImp.scala:107:25] output auto_out_c_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_c_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_c_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_c_bits_size, // @[LazyModuleImp.scala:107:25] output [4:0] auto_out_c_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_out_c_bits_address, // @[LazyModuleImp.scala:107:25] output [63:0] auto_out_c_bits_data, // @[LazyModuleImp.scala:107:25] output auto_out_c_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_out_d_ready, // @[LazyModuleImp.scala:107:25] input auto_out_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25] input [4:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_out_e_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_e_bits_sink // @[LazyModuleImp.scala:107:25] ); wire [31:0] _inclusive_cache_bank_sched_io_in_b_bits_address; // @[InclusiveCache.scala:137:29] wire [31:0] _inclusive_cache_bank_sched_io_out_a_bits_address; // @[InclusiveCache.scala:137:29] wire [31:0] _inclusive_cache_bank_sched_io_out_c_bits_address; // @[InclusiveCache.scala:137:29] wire _inclusive_cache_bank_sched_io_req_ready; // @[InclusiveCache.scala:137:29] wire _inclusive_cache_bank_sched_io_resp_valid; // @[InclusiveCache.scala:137:29] wire _ctrls_io_flush_req_valid; // @[InclusiveCache.scala:103:43] wire [63:0] _ctrls_io_flush_req_bits; // @[InclusiveCache.scala:103:43] wire auto_ctrls_ctrl_in_a_valid_0 = auto_ctrls_ctrl_in_a_valid; // @[InclusiveCache.scala:108:9] wire [2:0] auto_ctrls_ctrl_in_a_bits_opcode_0 = auto_ctrls_ctrl_in_a_bits_opcode; // @[InclusiveCache.scala:108:9] wire [2:0] auto_ctrls_ctrl_in_a_bits_param_0 = auto_ctrls_ctrl_in_a_bits_param; // @[InclusiveCache.scala:108:9] wire [1:0] auto_ctrls_ctrl_in_a_bits_size_0 = auto_ctrls_ctrl_in_a_bits_size; // @[InclusiveCache.scala:108:9] wire [12:0] auto_ctrls_ctrl_in_a_bits_source_0 = auto_ctrls_ctrl_in_a_bits_source; // @[InclusiveCache.scala:108:9] wire [25:0] auto_ctrls_ctrl_in_a_bits_address_0 = auto_ctrls_ctrl_in_a_bits_address; // @[InclusiveCache.scala:108:9] wire [7:0] auto_ctrls_ctrl_in_a_bits_mask_0 = auto_ctrls_ctrl_in_a_bits_mask; // @[InclusiveCache.scala:108:9] wire [63:0] auto_ctrls_ctrl_in_a_bits_data_0 = auto_ctrls_ctrl_in_a_bits_data; // @[InclusiveCache.scala:108:9] wire auto_ctrls_ctrl_in_a_bits_corrupt_0 = auto_ctrls_ctrl_in_a_bits_corrupt; // @[InclusiveCache.scala:108:9] wire auto_ctrls_ctrl_in_d_ready_0 = auto_ctrls_ctrl_in_d_ready; // @[InclusiveCache.scala:108:9] wire auto_in_a_valid_0 = auto_in_a_valid; // @[InclusiveCache.scala:108:9] wire [2:0] auto_in_a_bits_opcode_0 = auto_in_a_bits_opcode; // @[InclusiveCache.scala:108:9] wire [2:0] auto_in_a_bits_param_0 = auto_in_a_bits_param; // @[InclusiveCache.scala:108:9] wire [2:0] auto_in_a_bits_size_0 = auto_in_a_bits_size; // @[InclusiveCache.scala:108:9] wire [7:0] auto_in_a_bits_source_0 = auto_in_a_bits_source; // @[InclusiveCache.scala:108:9] wire [31:0] auto_in_a_bits_address_0 = auto_in_a_bits_address; // @[InclusiveCache.scala:108:9] wire [31:0] auto_in_a_bits_mask_0 = auto_in_a_bits_mask; // @[InclusiveCache.scala:108:9] wire [255:0] auto_in_a_bits_data_0 = auto_in_a_bits_data; // @[InclusiveCache.scala:108:9] wire auto_in_a_bits_corrupt_0 = auto_in_a_bits_corrupt; // @[InclusiveCache.scala:108:9] wire auto_in_b_ready_0 = auto_in_b_ready; // @[InclusiveCache.scala:108:9] wire auto_in_c_valid_0 = auto_in_c_valid; // @[InclusiveCache.scala:108:9] wire [2:0] auto_in_c_bits_opcode_0 = auto_in_c_bits_opcode; // @[InclusiveCache.scala:108:9] wire [2:0] auto_in_c_bits_param_0 = auto_in_c_bits_param; // @[InclusiveCache.scala:108:9] wire [2:0] auto_in_c_bits_size_0 = auto_in_c_bits_size; // @[InclusiveCache.scala:108:9] wire [7:0] auto_in_c_bits_source_0 = auto_in_c_bits_source; // @[InclusiveCache.scala:108:9] wire [31:0] auto_in_c_bits_address_0 = auto_in_c_bits_address; // @[InclusiveCache.scala:108:9] wire [255:0] auto_in_c_bits_data_0 = auto_in_c_bits_data; // @[InclusiveCache.scala:108:9] wire auto_in_c_bits_corrupt_0 = auto_in_c_bits_corrupt; // @[InclusiveCache.scala:108:9] wire auto_in_d_ready_0 = auto_in_d_ready; // @[InclusiveCache.scala:108:9] wire auto_in_e_valid_0 = auto_in_e_valid; // @[InclusiveCache.scala:108:9] wire [4:0] auto_in_e_bits_sink_0 = auto_in_e_bits_sink; // @[InclusiveCache.scala:108:9] wire auto_out_a_ready_0 = auto_out_a_ready; // @[InclusiveCache.scala:108:9] wire auto_out_c_ready_0 = auto_out_c_ready; // @[InclusiveCache.scala:108:9] wire auto_out_d_valid_0 = auto_out_d_valid; // @[InclusiveCache.scala:108:9] wire [2:0] auto_out_d_bits_opcode_0 = auto_out_d_bits_opcode; // @[InclusiveCache.scala:108:9] wire [1:0] auto_out_d_bits_param_0 = auto_out_d_bits_param; // @[InclusiveCache.scala:108:9] wire [2:0] auto_out_d_bits_size_0 = auto_out_d_bits_size; // @[InclusiveCache.scala:108:9] wire [4:0] auto_out_d_bits_source_0 = auto_out_d_bits_source; // @[InclusiveCache.scala:108:9] wire [2:0] auto_out_d_bits_sink_0 = auto_out_d_bits_sink; // @[InclusiveCache.scala:108:9] wire auto_out_d_bits_denied_0 = auto_out_d_bits_denied; // @[InclusiveCache.scala:108:9] wire [63:0] auto_out_d_bits_data_0 = auto_out_d_bits_data; // @[InclusiveCache.scala:108:9] wire auto_out_d_bits_corrupt_0 = auto_out_d_bits_corrupt; // @[InclusiveCache.scala:108:9] wire [32:0] _nodeOut_a_bits_address_mux_matches_T_2 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _nodeOut_a_bits_address_mux_matches_T_3 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _nodeIn_b_bits_address_mux_matches_T_2 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _nodeIn_b_bits_address_mux_matches_T_3 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _nodeOut_c_bits_address_mux_matches_T_2 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _nodeOut_c_bits_address_mux_matches_T_3 = 33'h0; // @[Parameters.scala:137:46] wire [63:0] auto_out_b_bits_data = 64'h0; // @[InclusiveCache.scala:108:9, :137:29] wire [63:0] nodeOut_b_bits_data = 64'h0; // @[InclusiveCache.scala:108:9, :137:29] wire [7:0] auto_out_b_bits_mask = 8'h0; // @[InclusiveCache.scala:108:9, :137:29] wire [7:0] nodeOut_b_bits_mask = 8'h0; // @[InclusiveCache.scala:108:9, :137:29] wire [31:0] auto_out_b_bits_address = 32'h0; // @[InclusiveCache.scala:108:9, :137:29] wire [31:0] nodeOut_b_bits_address = 32'h0; // @[InclusiveCache.scala:108:9, :137:29] wire [4:0] auto_out_b_bits_source = 5'h0; // @[InclusiveCache.scala:108:9, :137:29] wire [4:0] nodeOut_b_bits_source = 5'h0; // @[InclusiveCache.scala:108:9, :137:29] wire [2:0] auto_out_b_bits_opcode = 3'h0; // @[InclusiveCache.scala:108:9, :137:29] wire [2:0] auto_out_b_bits_size = 3'h0; // @[InclusiveCache.scala:108:9, :137:29] wire [2:0] nodeOut_b_bits_opcode = 3'h0; // @[InclusiveCache.scala:108:9, :137:29] wire [2:0] nodeOut_b_bits_size = 3'h0; // @[InclusiveCache.scala:108:9, :137:29] wire auto_in_e_ready = 1'h1; // @[Nodes.scala:27:25] wire auto_out_b_ready = 1'h1; // @[Nodes.scala:27:25] wire auto_out_e_ready = 1'h1; // @[Nodes.scala:27:25] wire nodeIn_e_ready = 1'h1; // @[Nodes.scala:27:25] wire nodeOut_b_ready = 1'h1; // @[Nodes.scala:27:25] wire nodeOut_e_ready = 1'h1; // @[Nodes.scala:27:25] wire nodeOut_a_bits_address_mux_0_1 = 1'h1; // @[Nodes.scala:27:25] wire nodeIn_b_bits_address_mux_0_1 = 1'h1; // @[Nodes.scala:27:25] wire nodeOut_c_bits_address_mux_0_1 = 1'h1; // @[Nodes.scala:27:25] wire [255:0] auto_in_b_bits_data = 256'h0; // @[Nodes.scala:27:25] wire [255:0] nodeIn_b_bits_data = 256'h0; // @[Nodes.scala:27:25] wire [31:0] auto_in_b_bits_mask = 32'hFFFFFFFF; // @[Nodes.scala:27:25] wire [31:0] nodeIn_b_bits_mask = 32'hFFFFFFFF; // @[Nodes.scala:27:25] wire [7:0] auto_in_b_bits_source = 8'h40; // @[Nodes.scala:27:25] wire [7:0] nodeIn_b_bits_source = 8'h40; // @[Nodes.scala:27:25] wire [2:0] auto_in_b_bits_opcode = 3'h6; // @[Nodes.scala:27:25] wire [2:0] auto_in_b_bits_size = 3'h6; // @[Nodes.scala:27:25] wire [2:0] nodeIn_b_bits_opcode = 3'h6; // @[Nodes.scala:27:25] wire [2:0] nodeIn_b_bits_size = 3'h6; // @[Nodes.scala:27:25] wire auto_ctrls_ctrl_in_d_bits_sink = 1'h0; // @[Nodes.scala:27:25] wire auto_ctrls_ctrl_in_d_bits_denied = 1'h0; // @[Nodes.scala:27:25] wire auto_ctrls_ctrl_in_d_bits_corrupt = 1'h0; // @[Nodes.scala:27:25] wire auto_in_b_bits_corrupt = 1'h0; // @[Nodes.scala:27:25] wire auto_out_b_valid = 1'h0; // @[Nodes.scala:27:25] wire auto_out_b_bits_corrupt = 1'h0; // @[Nodes.scala:27:25] wire nodeIn_b_bits_corrupt = 1'h0; // @[Nodes.scala:27:25] wire nodeOut_b_valid = 1'h0; // @[Nodes.scala:27:25] wire nodeOut_b_bits_corrupt = 1'h0; // @[Nodes.scala:27:25] wire [1:0] auto_ctrls_ctrl_in_d_bits_param = 2'h0; // @[InclusiveCache.scala:103:43, :108:9, :137:29] wire [1:0] auto_out_b_bits_param = 2'h0; // @[InclusiveCache.scala:103:43, :108:9, :137:29] wire [1:0] nodeOut_b_bits_param = 2'h0; // @[InclusiveCache.scala:103:43, :108:9, :137:29] wire nodeIn_a_ready; // @[MixedNode.scala:551:17] wire nodeIn_a_valid = auto_in_a_valid_0; // @[InclusiveCache.scala:108:9] wire [2:0] nodeIn_a_bits_opcode = auto_in_a_bits_opcode_0; // @[InclusiveCache.scala:108:9] wire [2:0] nodeIn_a_bits_param = auto_in_a_bits_param_0; // @[InclusiveCache.scala:108:9] wire [2:0] nodeIn_a_bits_size = auto_in_a_bits_size_0; // @[InclusiveCache.scala:108:9] wire [7:0] nodeIn_a_bits_source = auto_in_a_bits_source_0; // @[InclusiveCache.scala:108:9] wire [31:0] nodeIn_a_bits_address = auto_in_a_bits_address_0; // @[InclusiveCache.scala:108:9] wire [31:0] nodeIn_a_bits_mask = auto_in_a_bits_mask_0; // @[InclusiveCache.scala:108:9] wire [255:0] nodeIn_a_bits_data = auto_in_a_bits_data_0; // @[InclusiveCache.scala:108:9] wire nodeIn_a_bits_corrupt = auto_in_a_bits_corrupt_0; // @[InclusiveCache.scala:108:9] wire nodeIn_b_ready = auto_in_b_ready_0; // @[InclusiveCache.scala:108:9] wire nodeIn_b_valid; // @[MixedNode.scala:551:17] wire [1:0] nodeIn_b_bits_param; // @[MixedNode.scala:551:17] wire [31:0] nodeIn_b_bits_address; // @[MixedNode.scala:551:17] wire nodeIn_c_ready; // @[MixedNode.scala:551:17] wire nodeIn_c_valid = auto_in_c_valid_0; // @[InclusiveCache.scala:108:9] wire [2:0] nodeIn_c_bits_opcode = auto_in_c_bits_opcode_0; // @[InclusiveCache.scala:108:9] wire [2:0] nodeIn_c_bits_param = auto_in_c_bits_param_0; // @[InclusiveCache.scala:108:9] wire [2:0] nodeIn_c_bits_size = auto_in_c_bits_size_0; // @[InclusiveCache.scala:108:9] wire [7:0] nodeIn_c_bits_source = auto_in_c_bits_source_0; // @[InclusiveCache.scala:108:9] wire [31:0] nodeIn_c_bits_address = auto_in_c_bits_address_0; // @[InclusiveCache.scala:108:9] wire [255:0] nodeIn_c_bits_data = auto_in_c_bits_data_0; // @[InclusiveCache.scala:108:9] wire nodeIn_c_bits_corrupt = auto_in_c_bits_corrupt_0; // @[InclusiveCache.scala:108:9] wire nodeIn_d_ready = auto_in_d_ready_0; // @[InclusiveCache.scala:108:9] wire nodeIn_d_valid; // @[MixedNode.scala:551:17] wire [2:0] nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17] wire [1:0] nodeIn_d_bits_param; // @[MixedNode.scala:551:17] wire [2:0] nodeIn_d_bits_size; // @[MixedNode.scala:551:17] wire [7:0] nodeIn_d_bits_source; // @[MixedNode.scala:551:17] wire [4:0] nodeIn_d_bits_sink; // @[MixedNode.scala:551:17] wire nodeIn_d_bits_denied; // @[MixedNode.scala:551:17] wire [255:0] nodeIn_d_bits_data; // @[MixedNode.scala:551:17] wire nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17] wire nodeIn_e_valid = auto_in_e_valid_0; // @[InclusiveCache.scala:108:9] wire [4:0] nodeIn_e_bits_sink = auto_in_e_bits_sink_0; // @[InclusiveCache.scala:108:9] wire nodeOut_a_ready = auto_out_a_ready_0; // @[InclusiveCache.scala:108:9] wire nodeOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_a_bits_size; // @[MixedNode.scala:542:17] wire [4:0] nodeOut_a_bits_source; // @[MixedNode.scala:542:17] wire [31:0] nodeOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] nodeOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] nodeOut_a_bits_data; // @[MixedNode.scala:542:17] wire nodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17] wire nodeOut_c_ready = auto_out_c_ready_0; // @[InclusiveCache.scala:108:9] wire nodeOut_c_valid; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_c_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_c_bits_param; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_c_bits_size; // @[MixedNode.scala:542:17] wire [4:0] nodeOut_c_bits_source; // @[MixedNode.scala:542:17] wire [31:0] nodeOut_c_bits_address; // @[MixedNode.scala:542:17] wire [63:0] nodeOut_c_bits_data; // @[MixedNode.scala:542:17] wire nodeOut_c_bits_corrupt; // @[MixedNode.scala:542:17] wire nodeOut_d_ready; // @[MixedNode.scala:542:17] wire nodeOut_d_valid = auto_out_d_valid_0; // @[InclusiveCache.scala:108:9] wire [2:0] nodeOut_d_bits_opcode = auto_out_d_bits_opcode_0; // @[InclusiveCache.scala:108:9] wire [1:0] nodeOut_d_bits_param = auto_out_d_bits_param_0; // @[InclusiveCache.scala:108:9] wire [2:0] nodeOut_d_bits_size = auto_out_d_bits_size_0; // @[InclusiveCache.scala:108:9] wire [4:0] nodeOut_d_bits_source = auto_out_d_bits_source_0; // @[InclusiveCache.scala:108:9] wire [2:0] nodeOut_d_bits_sink = auto_out_d_bits_sink_0; // @[InclusiveCache.scala:108:9] wire nodeOut_d_bits_denied = auto_out_d_bits_denied_0; // @[InclusiveCache.scala:108:9] wire [63:0] nodeOut_d_bits_data = auto_out_d_bits_data_0; // @[InclusiveCache.scala:108:9] wire nodeOut_d_bits_corrupt = auto_out_d_bits_corrupt_0; // @[InclusiveCache.scala:108:9] wire nodeOut_e_valid; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_e_bits_sink; // @[MixedNode.scala:542:17] wire auto_ctrls_ctrl_in_a_ready_0; // @[InclusiveCache.scala:108:9] wire [2:0] auto_ctrls_ctrl_in_d_bits_opcode_0; // @[InclusiveCache.scala:108:9] wire [1:0] auto_ctrls_ctrl_in_d_bits_size_0; // @[InclusiveCache.scala:108:9] wire [12:0] auto_ctrls_ctrl_in_d_bits_source_0; // @[InclusiveCache.scala:108:9] wire [63:0] auto_ctrls_ctrl_in_d_bits_data_0; // @[InclusiveCache.scala:108:9] wire auto_ctrls_ctrl_in_d_valid_0; // @[InclusiveCache.scala:108:9] wire auto_in_a_ready_0; // @[InclusiveCache.scala:108:9] wire [1:0] auto_in_b_bits_param_0; // @[InclusiveCache.scala:108:9] wire [31:0] auto_in_b_bits_address_0; // @[InclusiveCache.scala:108:9] wire auto_in_b_valid_0; // @[InclusiveCache.scala:108:9] wire auto_in_c_ready_0; // @[InclusiveCache.scala:108:9] wire [2:0] auto_in_d_bits_opcode_0; // @[InclusiveCache.scala:108:9] wire [1:0] auto_in_d_bits_param_0; // @[InclusiveCache.scala:108:9] wire [2:0] auto_in_d_bits_size_0; // @[InclusiveCache.scala:108:9] wire [7:0] auto_in_d_bits_source_0; // @[InclusiveCache.scala:108:9] wire [4:0] auto_in_d_bits_sink_0; // @[InclusiveCache.scala:108:9] wire auto_in_d_bits_denied_0; // @[InclusiveCache.scala:108:9] wire [255:0] auto_in_d_bits_data_0; // @[InclusiveCache.scala:108:9] wire auto_in_d_bits_corrupt_0; // @[InclusiveCache.scala:108:9] wire auto_in_d_valid_0; // @[InclusiveCache.scala:108:9] wire [2:0] auto_out_a_bits_opcode_0; // @[InclusiveCache.scala:108:9] wire [2:0] auto_out_a_bits_param_0; // @[InclusiveCache.scala:108:9] wire [2:0] auto_out_a_bits_size_0; // @[InclusiveCache.scala:108:9] wire [4:0] auto_out_a_bits_source_0; // @[InclusiveCache.scala:108:9] wire [31:0] auto_out_a_bits_address_0; // @[InclusiveCache.scala:108:9] wire [7:0] auto_out_a_bits_mask_0; // @[InclusiveCache.scala:108:9] wire [63:0] auto_out_a_bits_data_0; // @[InclusiveCache.scala:108:9] wire auto_out_a_bits_corrupt_0; // @[InclusiveCache.scala:108:9] wire auto_out_a_valid_0; // @[InclusiveCache.scala:108:9] wire [2:0] auto_out_c_bits_opcode_0; // @[InclusiveCache.scala:108:9] wire [2:0] auto_out_c_bits_param_0; // @[InclusiveCache.scala:108:9] wire [2:0] auto_out_c_bits_size_0; // @[InclusiveCache.scala:108:9] wire [4:0] auto_out_c_bits_source_0; // @[InclusiveCache.scala:108:9] wire [31:0] auto_out_c_bits_address_0; // @[InclusiveCache.scala:108:9] wire [63:0] auto_out_c_bits_data_0; // @[InclusiveCache.scala:108:9] wire auto_out_c_bits_corrupt_0; // @[InclusiveCache.scala:108:9] wire auto_out_c_valid_0; // @[InclusiveCache.scala:108:9] wire auto_out_d_ready_0; // @[InclusiveCache.scala:108:9] wire [2:0] auto_out_e_bits_sink_0; // @[InclusiveCache.scala:108:9] wire auto_out_e_valid_0; // @[InclusiveCache.scala:108:9] assign auto_in_a_ready_0 = nodeIn_a_ready; // @[InclusiveCache.scala:108:9] assign auto_in_b_valid_0 = nodeIn_b_valid; // @[InclusiveCache.scala:108:9] assign auto_in_b_bits_param_0 = nodeIn_b_bits_param; // @[InclusiveCache.scala:108:9] wire [31:0] _nodeIn_b_bits_address_T; // @[Parameters.scala:248:14] assign auto_in_b_bits_address_0 = nodeIn_b_bits_address; // @[InclusiveCache.scala:108:9] assign auto_in_c_ready_0 = nodeIn_c_ready; // @[InclusiveCache.scala:108:9] assign auto_in_d_valid_0 = nodeIn_d_valid; // @[InclusiveCache.scala:108:9] assign auto_in_d_bits_opcode_0 = nodeIn_d_bits_opcode; // @[InclusiveCache.scala:108:9] assign auto_in_d_bits_param_0 = nodeIn_d_bits_param; // @[InclusiveCache.scala:108:9] assign auto_in_d_bits_size_0 = nodeIn_d_bits_size; // @[InclusiveCache.scala:108:9] assign auto_in_d_bits_source_0 = nodeIn_d_bits_source; // @[InclusiveCache.scala:108:9] assign auto_in_d_bits_sink_0 = nodeIn_d_bits_sink; // @[InclusiveCache.scala:108:9] assign auto_in_d_bits_denied_0 = nodeIn_d_bits_denied; // @[InclusiveCache.scala:108:9] assign auto_in_d_bits_data_0 = nodeIn_d_bits_data; // @[InclusiveCache.scala:108:9] assign auto_in_d_bits_corrupt_0 = nodeIn_d_bits_corrupt; // @[InclusiveCache.scala:108:9] assign auto_out_a_valid_0 = nodeOut_a_valid; // @[InclusiveCache.scala:108:9] assign auto_out_a_bits_opcode_0 = nodeOut_a_bits_opcode; // @[InclusiveCache.scala:108:9] assign auto_out_a_bits_param_0 = nodeOut_a_bits_param; // @[InclusiveCache.scala:108:9] assign auto_out_a_bits_size_0 = nodeOut_a_bits_size; // @[InclusiveCache.scala:108:9] assign auto_out_a_bits_source_0 = nodeOut_a_bits_source; // @[InclusiveCache.scala:108:9] wire [31:0] _nodeOut_a_bits_address_T; // @[Parameters.scala:248:14] assign auto_out_a_bits_address_0 = nodeOut_a_bits_address; // @[InclusiveCache.scala:108:9] assign auto_out_a_bits_mask_0 = nodeOut_a_bits_mask; // @[InclusiveCache.scala:108:9] assign auto_out_a_bits_data_0 = nodeOut_a_bits_data; // @[InclusiveCache.scala:108:9] assign auto_out_a_bits_corrupt_0 = nodeOut_a_bits_corrupt; // @[InclusiveCache.scala:108:9] assign auto_out_c_valid_0 = nodeOut_c_valid; // @[InclusiveCache.scala:108:9] assign auto_out_c_bits_opcode_0 = nodeOut_c_bits_opcode; // @[InclusiveCache.scala:108:9] assign auto_out_c_bits_param_0 = nodeOut_c_bits_param; // @[InclusiveCache.scala:108:9] assign auto_out_c_bits_size_0 = nodeOut_c_bits_size; // @[InclusiveCache.scala:108:9] assign auto_out_c_bits_source_0 = nodeOut_c_bits_source; // @[InclusiveCache.scala:108:9] wire [31:0] _nodeOut_c_bits_address_T; // @[Parameters.scala:248:14] assign auto_out_c_bits_address_0 = nodeOut_c_bits_address; // @[InclusiveCache.scala:108:9] assign auto_out_c_bits_data_0 = nodeOut_c_bits_data; // @[InclusiveCache.scala:108:9] assign auto_out_c_bits_corrupt_0 = nodeOut_c_bits_corrupt; // @[InclusiveCache.scala:108:9] assign auto_out_d_ready_0 = nodeOut_d_ready; // @[InclusiveCache.scala:108:9] assign auto_out_e_valid_0 = nodeOut_e_valid; // @[InclusiveCache.scala:108:9] assign auto_out_e_bits_sink_0 = nodeOut_e_bits_sink; // @[InclusiveCache.scala:108:9] wire [31:0] _nodeOut_a_bits_address_mux_matches_T; // @[Parameters.scala:137:31] wire [32:0] _nodeOut_a_bits_address_mux_matches_T_1 = {1'h0, _nodeOut_a_bits_address_mux_matches_T}; // @[Nodes.scala:27:25] assign nodeOut_a_bits_address = _nodeOut_a_bits_address_T; // @[Parameters.scala:248:14] wire [31:0] _nodeIn_b_bits_address_mux_matches_T; // @[Parameters.scala:137:31] wire [32:0] _nodeIn_b_bits_address_mux_matches_T_1 = {1'h0, _nodeIn_b_bits_address_mux_matches_T}; // @[Nodes.scala:27:25] assign nodeIn_b_bits_address = _nodeIn_b_bits_address_T; // @[Parameters.scala:248:14] wire [31:0] _nodeOut_c_bits_address_mux_matches_T; // @[Parameters.scala:137:31] wire [32:0] _nodeOut_c_bits_address_mux_matches_T_1 = {1'h0, _nodeOut_c_bits_address_mux_matches_T}; // @[Nodes.scala:27:25] assign nodeOut_c_bits_address = _nodeOut_c_bits_address_T; // @[Parameters.scala:248:14] wire [63:0] _contained_T = {_ctrls_io_flush_req_bits[63:32], _ctrls_io_flush_req_bits[31:0] ^ 32'h80000000}; // @[Parameters.scala:137:31] wire [64:0] _contained_T_1 = {1'h0, _contained_T}; // @[Nodes.scala:27:25] wire [64:0] _contained_T_2 = _contained_T_1 & 65'h1FFFFFFFFF0000000; // @[Parameters.scala:137:{41,46}] wire [64:0] _contained_T_3 = _contained_T_2; // @[Parameters.scala:137:46] wire _contained_T_4 = _contained_T_3 == 65'h0; // @[Parameters.scala:137:{46,59}] wire [63:0] _contained_T_5 = {_ctrls_io_flush_req_bits[63:28], _ctrls_io_flush_req_bits[27:0] ^ 28'h8000000}; // @[Parameters.scala:137:31] wire [64:0] _contained_T_6 = {1'h0, _contained_T_5}; // @[Nodes.scala:27:25] wire [64:0] _contained_T_7 = _contained_T_6 & 65'h1FFFFFFFFFFFF0000; // @[Parameters.scala:137:{41,46}] wire [64:0] _contained_T_8 = _contained_T_7; // @[Parameters.scala:137:46] wire _contained_T_9 = _contained_T_8 == 65'h0; // @[Parameters.scala:137:{46,59}] wire contained = _contained_T_4 | _contained_T_9; // @[Parameters.scala:137:59] wire _inclusive_cache_bank_sched_io_req_valid_T = contained & _ctrls_io_flush_req_valid; // @[InclusiveCache.scala:103:43, :169:67, :172:41] InclusiveCacheControl ctrls ( // @[InclusiveCache.scala:103:43] .clock (clock), .reset (reset), .auto_ctrl_in_a_ready (auto_ctrls_ctrl_in_a_ready_0), .auto_ctrl_in_a_valid (auto_ctrls_ctrl_in_a_valid_0), // @[InclusiveCache.scala:108:9] .auto_ctrl_in_a_bits_opcode (auto_ctrls_ctrl_in_a_bits_opcode_0), // @[InclusiveCache.scala:108:9] .auto_ctrl_in_a_bits_param (auto_ctrls_ctrl_in_a_bits_param_0), // @[InclusiveCache.scala:108:9] .auto_ctrl_in_a_bits_size (auto_ctrls_ctrl_in_a_bits_size_0), // @[InclusiveCache.scala:108:9] .auto_ctrl_in_a_bits_source (auto_ctrls_ctrl_in_a_bits_source_0), // @[InclusiveCache.scala:108:9] .auto_ctrl_in_a_bits_address (auto_ctrls_ctrl_in_a_bits_address_0), // @[InclusiveCache.scala:108:9] .auto_ctrl_in_a_bits_mask (auto_ctrls_ctrl_in_a_bits_mask_0), // @[InclusiveCache.scala:108:9] .auto_ctrl_in_a_bits_data (auto_ctrls_ctrl_in_a_bits_data_0), // @[InclusiveCache.scala:108:9] .auto_ctrl_in_a_bits_corrupt (auto_ctrls_ctrl_in_a_bits_corrupt_0), // @[InclusiveCache.scala:108:9] .auto_ctrl_in_d_ready (auto_ctrls_ctrl_in_d_ready_0), // @[InclusiveCache.scala:108:9] .auto_ctrl_in_d_valid (auto_ctrls_ctrl_in_d_valid_0), .auto_ctrl_in_d_bits_opcode (auto_ctrls_ctrl_in_d_bits_opcode_0), .auto_ctrl_in_d_bits_size (auto_ctrls_ctrl_in_d_bits_size_0), .auto_ctrl_in_d_bits_source (auto_ctrls_ctrl_in_d_bits_source_0), .auto_ctrl_in_d_bits_data (auto_ctrls_ctrl_in_d_bits_data_0), .io_flush_match (contained), // @[InclusiveCache.scala:169:67] .io_flush_req_ready (contained & _inclusive_cache_bank_sched_io_req_ready), // @[InclusiveCache.scala:137:29, :169:67, :174:25] .io_flush_req_valid (_ctrls_io_flush_req_valid), .io_flush_req_bits (_ctrls_io_flush_req_bits), .io_flush_resp (_inclusive_cache_bank_sched_io_resp_valid) // @[InclusiveCache.scala:137:29] ); // @[InclusiveCache.scala:103:43] TLMonitor_36 monitor ( // @[Nodes.scala:27:25] .clock (clock), .reset (reset), .io_in_a_ready (nodeIn_a_ready), // @[MixedNode.scala:551:17] .io_in_a_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17] .io_in_a_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17] .io_in_a_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17] .io_in_a_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17] .io_in_a_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17] .io_in_a_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17] .io_in_a_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17] .io_in_a_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17] .io_in_a_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17] .io_in_b_ready (nodeIn_b_ready), // @[MixedNode.scala:551:17] .io_in_b_valid (nodeIn_b_valid), // @[MixedNode.scala:551:17] .io_in_b_bits_param (nodeIn_b_bits_param), // @[MixedNode.scala:551:17] .io_in_b_bits_address (nodeIn_b_bits_address), // @[MixedNode.scala:551:17] .io_in_c_ready (nodeIn_c_ready), // @[MixedNode.scala:551:17] .io_in_c_valid (nodeIn_c_valid), // @[MixedNode.scala:551:17] .io_in_c_bits_opcode (nodeIn_c_bits_opcode), // @[MixedNode.scala:551:17] .io_in_c_bits_param (nodeIn_c_bits_param), // @[MixedNode.scala:551:17] .io_in_c_bits_size (nodeIn_c_bits_size), // @[MixedNode.scala:551:17] .io_in_c_bits_source (nodeIn_c_bits_source), // @[MixedNode.scala:551:17] .io_in_c_bits_address (nodeIn_c_bits_address), // @[MixedNode.scala:551:17] .io_in_c_bits_data (nodeIn_c_bits_data), // @[MixedNode.scala:551:17] .io_in_c_bits_corrupt (nodeIn_c_bits_corrupt), // @[MixedNode.scala:551:17] .io_in_d_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17] .io_in_d_valid (nodeIn_d_valid), // @[MixedNode.scala:551:17] .io_in_d_bits_opcode (nodeIn_d_bits_opcode), // @[MixedNode.scala:551:17] .io_in_d_bits_param (nodeIn_d_bits_param), // @[MixedNode.scala:551:17] .io_in_d_bits_size (nodeIn_d_bits_size), // @[MixedNode.scala:551:17] .io_in_d_bits_source (nodeIn_d_bits_source), // @[MixedNode.scala:551:17] .io_in_d_bits_sink (nodeIn_d_bits_sink), // @[MixedNode.scala:551:17] .io_in_d_bits_denied (nodeIn_d_bits_denied), // @[MixedNode.scala:551:17] .io_in_d_bits_data (nodeIn_d_bits_data), // @[MixedNode.scala:551:17] .io_in_d_bits_corrupt (nodeIn_d_bits_corrupt), // @[MixedNode.scala:551:17] .io_in_e_valid (nodeIn_e_valid), // @[MixedNode.scala:551:17] .io_in_e_bits_sink (nodeIn_e_bits_sink) // @[MixedNode.scala:551:17] ); // @[Nodes.scala:27:25] InclusiveCacheBankScheduler inclusive_cache_bank_sched ( // @[InclusiveCache.scala:137:29] .clock (clock), .reset (reset), .io_in_a_ready (nodeIn_a_ready), .io_in_a_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17] .io_in_a_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17] .io_in_a_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17] .io_in_a_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17] .io_in_a_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17] .io_in_a_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17] .io_in_a_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17] .io_in_a_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17] .io_in_a_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17] .io_in_b_ready (nodeIn_b_ready), // @[MixedNode.scala:551:17] .io_in_b_valid (nodeIn_b_valid), .io_in_b_bits_param (nodeIn_b_bits_param), .io_in_b_bits_address (_inclusive_cache_bank_sched_io_in_b_bits_address), .io_in_c_ready (nodeIn_c_ready), .io_in_c_valid (nodeIn_c_valid), // @[MixedNode.scala:551:17] .io_in_c_bits_opcode (nodeIn_c_bits_opcode), // @[MixedNode.scala:551:17] .io_in_c_bits_param (nodeIn_c_bits_param), // @[MixedNode.scala:551:17] .io_in_c_bits_size (nodeIn_c_bits_size), // @[MixedNode.scala:551:17] .io_in_c_bits_source (nodeIn_c_bits_source), // @[MixedNode.scala:551:17] .io_in_c_bits_address (nodeIn_c_bits_address), // @[MixedNode.scala:551:17] .io_in_c_bits_data (nodeIn_c_bits_data), // @[MixedNode.scala:551:17] .io_in_c_bits_corrupt (nodeIn_c_bits_corrupt), // @[MixedNode.scala:551:17] .io_in_d_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17] .io_in_d_valid (nodeIn_d_valid), .io_in_d_bits_opcode (nodeIn_d_bits_opcode), .io_in_d_bits_param (nodeIn_d_bits_param), .io_in_d_bits_size (nodeIn_d_bits_size), .io_in_d_bits_source (nodeIn_d_bits_source), .io_in_d_bits_sink (nodeIn_d_bits_sink), .io_in_d_bits_denied (nodeIn_d_bits_denied), .io_in_d_bits_data (nodeIn_d_bits_data), .io_in_d_bits_corrupt (nodeIn_d_bits_corrupt), .io_in_e_valid (nodeIn_e_valid), // @[MixedNode.scala:551:17] .io_in_e_bits_sink (nodeIn_e_bits_sink), // @[MixedNode.scala:551:17] .io_out_a_ready (nodeOut_a_ready), // @[MixedNode.scala:542:17] .io_out_a_valid (nodeOut_a_valid), .io_out_a_bits_opcode (nodeOut_a_bits_opcode), .io_out_a_bits_param (nodeOut_a_bits_param), .io_out_a_bits_size (nodeOut_a_bits_size), .io_out_a_bits_source (nodeOut_a_bits_source), .io_out_a_bits_address (_inclusive_cache_bank_sched_io_out_a_bits_address), .io_out_a_bits_mask (nodeOut_a_bits_mask), .io_out_a_bits_data (nodeOut_a_bits_data), .io_out_a_bits_corrupt (nodeOut_a_bits_corrupt), .io_out_c_ready (nodeOut_c_ready), // @[MixedNode.scala:542:17] .io_out_c_valid (nodeOut_c_valid), .io_out_c_bits_opcode (nodeOut_c_bits_opcode), .io_out_c_bits_param (nodeOut_c_bits_param), .io_out_c_bits_size (nodeOut_c_bits_size), .io_out_c_bits_source (nodeOut_c_bits_source), .io_out_c_bits_address (_inclusive_cache_bank_sched_io_out_c_bits_address), .io_out_c_bits_data (nodeOut_c_bits_data), .io_out_c_bits_corrupt (nodeOut_c_bits_corrupt), .io_out_d_ready (nodeOut_d_ready), .io_out_d_valid (nodeOut_d_valid), // @[MixedNode.scala:542:17] .io_out_d_bits_opcode (nodeOut_d_bits_opcode), // @[MixedNode.scala:542:17] .io_out_d_bits_param (nodeOut_d_bits_param), // @[MixedNode.scala:542:17] .io_out_d_bits_size (nodeOut_d_bits_size), // @[MixedNode.scala:542:17] .io_out_d_bits_source (nodeOut_d_bits_source), // @[MixedNode.scala:542:17] .io_out_d_bits_sink (nodeOut_d_bits_sink), // @[MixedNode.scala:542:17] .io_out_d_bits_denied (nodeOut_d_bits_denied), // @[MixedNode.scala:542:17] .io_out_d_bits_data (nodeOut_d_bits_data), // @[MixedNode.scala:542:17] .io_out_d_bits_corrupt (nodeOut_d_bits_corrupt), // @[MixedNode.scala:542:17] .io_out_e_valid (nodeOut_e_valid), .io_out_e_bits_sink (nodeOut_e_bits_sink), .io_req_ready (_inclusive_cache_bank_sched_io_req_ready), .io_req_valid (_inclusive_cache_bank_sched_io_req_valid_T), // @[InclusiveCache.scala:172:41] .io_req_bits_address (_ctrls_io_flush_req_bits[31:0]), // @[Parameters.scala:137:31] .io_resp_valid (_inclusive_cache_bank_sched_io_resp_valid) ); // @[InclusiveCache.scala:137:29] assign _nodeOut_a_bits_address_mux_matches_T = _inclusive_cache_bank_sched_io_out_a_bits_address; // @[Parameters.scala:137:31] assign _nodeOut_a_bits_address_T = _inclusive_cache_bank_sched_io_out_a_bits_address; // @[Parameters.scala:248:14] assign _nodeIn_b_bits_address_mux_matches_T = _inclusive_cache_bank_sched_io_in_b_bits_address; // @[Parameters.scala:137:31] assign _nodeIn_b_bits_address_T = _inclusive_cache_bank_sched_io_in_b_bits_address; // @[Parameters.scala:248:14] assign _nodeOut_c_bits_address_mux_matches_T = _inclusive_cache_bank_sched_io_out_c_bits_address; // @[Parameters.scala:137:31] assign _nodeOut_c_bits_address_T = _inclusive_cache_bank_sched_io_out_c_bits_address; // @[Parameters.scala:248:14] assign auto_ctrls_ctrl_in_a_ready = auto_ctrls_ctrl_in_a_ready_0; // @[InclusiveCache.scala:108:9] assign auto_ctrls_ctrl_in_d_valid = auto_ctrls_ctrl_in_d_valid_0; // @[InclusiveCache.scala:108:9] assign auto_ctrls_ctrl_in_d_bits_opcode = auto_ctrls_ctrl_in_d_bits_opcode_0; // @[InclusiveCache.scala:108:9] assign auto_ctrls_ctrl_in_d_bits_size = auto_ctrls_ctrl_in_d_bits_size_0; // @[InclusiveCache.scala:108:9] assign auto_ctrls_ctrl_in_d_bits_source = auto_ctrls_ctrl_in_d_bits_source_0; // @[InclusiveCache.scala:108:9] assign auto_ctrls_ctrl_in_d_bits_data = auto_ctrls_ctrl_in_d_bits_data_0; // @[InclusiveCache.scala:108:9] assign auto_in_a_ready = auto_in_a_ready_0; // @[InclusiveCache.scala:108:9] assign auto_in_b_valid = auto_in_b_valid_0; // @[InclusiveCache.scala:108:9] assign auto_in_b_bits_param = auto_in_b_bits_param_0; // @[InclusiveCache.scala:108:9] assign auto_in_b_bits_address = auto_in_b_bits_address_0; // @[InclusiveCache.scala:108:9] assign auto_in_c_ready = auto_in_c_ready_0; // @[InclusiveCache.scala:108:9] assign auto_in_d_valid = auto_in_d_valid_0; // @[InclusiveCache.scala:108:9] assign auto_in_d_bits_opcode = auto_in_d_bits_opcode_0; // @[InclusiveCache.scala:108:9] assign auto_in_d_bits_param = auto_in_d_bits_param_0; // @[InclusiveCache.scala:108:9] assign auto_in_d_bits_size = auto_in_d_bits_size_0; // @[InclusiveCache.scala:108:9] assign auto_in_d_bits_source = auto_in_d_bits_source_0; // @[InclusiveCache.scala:108:9] assign auto_in_d_bits_sink = auto_in_d_bits_sink_0; // @[InclusiveCache.scala:108:9] assign auto_in_d_bits_denied = auto_in_d_bits_denied_0; // @[InclusiveCache.scala:108:9] assign auto_in_d_bits_data = auto_in_d_bits_data_0; // @[InclusiveCache.scala:108:9] assign auto_in_d_bits_corrupt = auto_in_d_bits_corrupt_0; // @[InclusiveCache.scala:108:9] assign auto_out_a_valid = auto_out_a_valid_0; // @[InclusiveCache.scala:108:9] assign auto_out_a_bits_opcode = auto_out_a_bits_opcode_0; // @[InclusiveCache.scala:108:9] assign auto_out_a_bits_param = auto_out_a_bits_param_0; // @[InclusiveCache.scala:108:9] assign auto_out_a_bits_size = auto_out_a_bits_size_0; // @[InclusiveCache.scala:108:9] assign auto_out_a_bits_source = auto_out_a_bits_source_0; // @[InclusiveCache.scala:108:9] assign auto_out_a_bits_address = auto_out_a_bits_address_0; // @[InclusiveCache.scala:108:9] assign auto_out_a_bits_mask = auto_out_a_bits_mask_0; // @[InclusiveCache.scala:108:9] assign auto_out_a_bits_data = auto_out_a_bits_data_0; // @[InclusiveCache.scala:108:9] assign auto_out_a_bits_corrupt = auto_out_a_bits_corrupt_0; // @[InclusiveCache.scala:108:9] assign auto_out_c_valid = auto_out_c_valid_0; // @[InclusiveCache.scala:108:9] assign auto_out_c_bits_opcode = auto_out_c_bits_opcode_0; // @[InclusiveCache.scala:108:9] assign auto_out_c_bits_param = auto_out_c_bits_param_0; // @[InclusiveCache.scala:108:9] assign auto_out_c_bits_size = auto_out_c_bits_size_0; // @[InclusiveCache.scala:108:9] assign auto_out_c_bits_source = auto_out_c_bits_source_0; // @[InclusiveCache.scala:108:9] assign auto_out_c_bits_address = auto_out_c_bits_address_0; // @[InclusiveCache.scala:108:9] assign auto_out_c_bits_data = auto_out_c_bits_data_0; // @[InclusiveCache.scala:108:9] assign auto_out_c_bits_corrupt = auto_out_c_bits_corrupt_0; // @[InclusiveCache.scala:108:9] assign auto_out_d_ready = auto_out_d_ready_0; // @[InclusiveCache.scala:108:9] assign auto_out_e_valid = auto_out_e_valid_0; // @[InclusiveCache.scala:108:9] assign auto_out_e_bits_sink = auto_out_e_bits_sink_0; // @[InclusiveCache.scala:108:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File SinkE.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import freechips.rocketchip.tilelink._ class SinkEResponse(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val sink = UInt(params.inner.bundle.sinkBits.W) } class SinkE(params: InclusiveCacheParameters) extends Module { val io = IO(new Bundle { val resp = Valid(new SinkEResponse(params)) val e = Flipped(Decoupled(new TLBundleE(params.inner.bundle))) }) if (params.firstLevel) { // Tie off unused ports io.resp.valid := false.B io.resp.bits := DontCare io.e.ready := true.B } else { // No restrictions on buffer val e = params.micro.innerBuf.e(io.e) e.ready := true.B io.resp.valid := e.valid io.resp.bits.sink := e.bits.sink } }
module SinkE_1( // @[SinkE.scala:29:7] input clock, // @[SinkE.scala:29:7] input reset, // @[SinkE.scala:29:7] output io_resp_valid, // @[SinkE.scala:31:14] output [3:0] io_resp_bits_sink, // @[SinkE.scala:31:14] input io_e_valid, // @[SinkE.scala:31:14] input [3:0] io_e_bits_sink // @[SinkE.scala:31:14] ); wire io_e_valid_0 = io_e_valid; // @[SinkE.scala:29:7] wire [3:0] io_e_bits_sink_0 = io_e_bits_sink; // @[SinkE.scala:29:7] wire io_e_ready = 1'h1; // @[SinkE.scala:29:7] wire io_resp_valid_0 = io_e_valid_0; // @[SinkE.scala:29:7] wire [3:0] io_resp_bits_sink_0 = io_e_bits_sink_0; // @[SinkE.scala:29:7] assign io_resp_valid = io_resp_valid_0; // @[SinkE.scala:29:7] assign io_resp_bits_sink = io_resp_bits_sink_0; // @[SinkE.scala:29:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File UnsafeAXI4ToTL.scala: package ara import chisel3._ import chisel3.util._ import freechips.rocketchip.amba._ import freechips.rocketchip.amba.axi4._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle { val data = UInt(dataWidth.W) val resp = UInt(respWidth.W) val last = Bool() val user = BundleMap(userFields) } /** Parameters for [[BaseReservableListBuffer]] and all child classes. * * @param numEntries Total number of elements that can be stored in the 'data' RAM * @param numLists Maximum number of linked lists * @param numBeats Maximum number of beats per entry */ case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) { // Avoid zero-width wires when we call 'log2Ceil' val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries) val listBits = if (numLists == 1) 1 else log2Ceil(numLists) val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats) } case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName) extends MixedAdapterNode(AXI4Imp, TLImp)( dFn = { case mp => TLMasterPortParameters.v2( masters = mp.masters.zipWithIndex.map { case (m, i) => // Support 'numTlTxns' read requests and 'numTlTxns' write requests at once. val numSourceIds = numTlTxns * 2 TLMasterParameters.v2( name = m.name, sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds), nodePath = m.nodePath ) }, echoFields = mp.echoFields, requestFields = AMBAProtField() +: mp.requestFields, responseKeys = mp.responseKeys ) }, uFn = { mp => AXI4SlavePortParameters( slaves = mp.managers.map { m => val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits)) AXI4SlaveParameters( address = m.address, resources = m.resources, regionType = m.regionType, executable = m.executable, nodePath = m.nodePath, supportsWrite = m.supportsPutPartial.intersect(maxXfer), supportsRead = m.supportsGet.intersect(maxXfer), interleavedId = Some(0) // TL2 never interleaves D beats ) }, beatBytes = mp.beatBytes, minLatency = mp.minLatency, responseFields = mp.responseFields, requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt) ) } ) class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule { require(numTlTxns >= 1) require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2") val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt) lazy val module = new LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => edgeIn.master.masters.foreach { m => require(m.aligned, "AXI4ToTL requires aligned requests") } val numIds = edgeIn.master.endId val beatBytes = edgeOut.slave.beatBytes val maxTransfer = edgeOut.slave.maxTransfer val maxBeats = maxTransfer / beatBytes // Look for an Error device to redirect bad requests val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError") require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.") val errorDev = errorDevs.maxBy(_.maxTransfer) val errorDevAddr = errorDev.address.head.base require( errorDev.supportsPutPartial.contains(maxTransfer), s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer" ) require( errorDev.supportsGet.contains(maxTransfer), s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer" ) // All of the read-response reordering logic. val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields) val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats) val listBuffer = if (numTlTxns > 1) { Module(new ReservableListBuffer(listBufData, listBufParams)) } else { Module(new PassthroughListBuffer(listBufData, listBufParams)) } // To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to // 0 for read requests and 1 for write requests. val isReadSourceBit = 0.U(1.W) val isWriteSourceBit = 1.U(1.W) /* Read request logic */ val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val rBytes1 = in.ar.bits.bytes1() val rSize = OH1ToUInt(rBytes1) val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize) val rId = if (numTlTxns > 1) { Cat(isReadSourceBit, listBuffer.ioReservedIndex) } else { isReadSourceBit } val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Indicates if there are still valid TileLink source IDs left to use. val canIssueR = listBuffer.ioReserve.ready listBuffer.ioReserve.bits := in.ar.bits.id listBuffer.ioReserve.valid := in.ar.valid && rOut.ready in.ar.ready := rOut.ready && canIssueR rOut.valid := in.ar.valid && canIssueR rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2 rOut.bits.user :<= in.ar.bits.user rOut.bits.user.lift(AMBAProt).foreach { rProt => rProt.privileged := in.ar.bits.prot(0) rProt.secure := !in.ar.bits.prot(1) rProt.fetch := in.ar.bits.prot(2) rProt.bufferable := in.ar.bits.cache(0) rProt.modifiable := in.ar.bits.cache(1) rProt.readalloc := in.ar.bits.cache(2) rProt.writealloc := in.ar.bits.cache(3) } /* Write request logic */ // Strip off the MSB, which identifies the transaction as read vs write. val strippedResponseSourceId = if (numTlTxns > 1) { out.d.bits.source((out.d.bits.source).getWidth - 2, 0) } else { // When there's only 1 TileLink transaction allowed for read/write, then this field is always 0. 0.U(1.W) } // Track when a write request burst is in progress. val writeBurstBusy = RegInit(false.B) when(in.w.fire) { writeBurstBusy := !in.w.bits.last } val usedWriteIds = RegInit(0.U(numTlTxns.W)) val canIssueW = !usedWriteIds.andR val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W)) val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W)) usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet // Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't // change mid-burst. val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W)) val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy val freeWriteIdIndex = OHToUInt(freeWriteIdOH) freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val wBytes1 = in.aw.bits.bytes1() val wSize = OH1ToUInt(wBytes1) val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize) val wId = if (numTlTxns > 1) { Cat(isWriteSourceBit, freeWriteIdIndex) } else { isWriteSourceBit } val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain // asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but // the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb // bits during a W-channel burst. in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW in.w.ready := wOut.ready && in.aw.valid && canIssueW wOut.valid := in.aw.valid && in.w.valid && canIssueW wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2 in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ } wOut.bits.user :<= in.aw.bits.user wOut.bits.user.lift(AMBAProt).foreach { wProt => wProt.privileged := in.aw.bits.prot(0) wProt.secure := !in.aw.bits.prot(1) wProt.fetch := in.aw.bits.prot(2) wProt.bufferable := in.aw.bits.cache(0) wProt.modifiable := in.aw.bits.cache(1) wProt.readalloc := in.aw.bits.cache(2) wProt.writealloc := in.aw.bits.cache(3) } // Merge the AXI4 read/write requests into the TL-A channel. TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut)) /* Read/write response logic */ val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle))) val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle))) val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY) val dHasData = edgeOut.hasData(out.d.bits) val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d) val dNumBeats1 = edgeOut.numBeats1(out.d.bits) // Handle cases where writeack arrives before write is done val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck) listBuffer.ioDataOut.ready := okR.ready okR.valid := listBuffer.ioDataOut.valid okB.valid := out.d.valid && !dHasData && !writeEarlyAck listBuffer.ioResponse.valid := out.d.valid && dHasData listBuffer.ioResponse.bits.index := strippedResponseSourceId listBuffer.ioResponse.bits.data.data := out.d.bits.data listBuffer.ioResponse.bits.data.resp := dResp listBuffer.ioResponse.bits.data.last := dLast listBuffer.ioResponse.bits.data.user :<= out.d.bits.user listBuffer.ioResponse.bits.count := dCount listBuffer.ioResponse.bits.numBeats1 := dNumBeats1 okR.bits.id := listBuffer.ioDataOut.bits.listIndex okR.bits.data := listBuffer.ioDataOut.bits.payload.data okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp okR.bits.last := listBuffer.ioDataOut.bits.payload.last okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user // Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write // response, mark the write transaction as complete. val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W)) val writeResponseId = writeIdMap.read(strippedResponseSourceId) when(wOut.fire) { writeIdMap.write(freeWriteIdIndex, in.aw.bits.id) } when(edgeOut.done(wOut)) { usedWriteIdsSet := freeWriteIdOH } when(okB.fire) { usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns) } okB.bits.id := writeResponseId okB.bits.resp := dResp okB.bits.user :<= out.d.bits.user // AXI4 needs irrevocable behaviour in.r <> Queue.irrevocable(okR, 1, flow = true) in.b <> Queue.irrevocable(okB, 1, flow = true) // Unused channels out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B /* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */ def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = { val lReqType = reqType.toLowerCase when(a.valid) { assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U) // Narrow transfers and FIXED bursts must be single-beat bursts. when(a.bits.len =/= 0.U) { assert( a.bits.size === log2Ceil(beatBytes).U, s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)", 1.U << a.bits.size, a.bits.len + 1.U ) assert( a.bits.burst =/= AXI4Parameters.BURST_FIXED, s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)", a.bits.len + 1.U ) } // Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in // particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink // Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts. } } checkRequest(in.ar, "Read") checkRequest(in.aw, "Write") } } } object UnsafeAXI4ToTL { def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = { val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt)) axi42tl.node } } /* ReservableListBuffer logic, and associated classes. */ class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle { val index = UInt(params.entryBits.W) val count = UInt(params.beatBits.W) val numBeats1 = UInt(params.beatBits.W) } class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle { val listIndex = UInt(params.listBits.W) } /** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */ abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends Module { require(params.numEntries > 0) require(params.numLists > 0) val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W)))) val ioReservedIndex = IO(Output(UInt(params.entryBits.W))) val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params)))) val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params))) } /** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve * linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the * 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a * given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order. * * ==Constructor== * @param gen Chisel type of linked list data element * @param params Other parameters * * ==Module IO== * @param ioReserve Index of list to reserve a new element in * @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire' * @param ioResponse Payload containing response data and linked-list-entry index * @param ioDataOut Payload containing data read from response linked list and linked list index */ class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { val valid = RegInit(0.U(params.numLists.W)) val head = Mem(params.numLists, UInt(params.entryBits.W)) val tail = Mem(params.numLists, UInt(params.entryBits.W)) val used = RegInit(0.U(params.numEntries.W)) val next = Mem(params.numEntries, UInt(params.entryBits.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) } val dataIsPresent = RegInit(0.U(params.numEntries.W)) val beats = Mem(params.numEntries, UInt(params.beatBits.W)) // The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower. val dataMemReadEnable = WireDefault(false.B) val dataMemWriteEnable = WireDefault(false.B) assert(!(dataMemReadEnable && dataMemWriteEnable)) // 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the // lowest-index entry in the 'data' RAM which is free. val freeOH = Wire(UInt(params.numEntries.W)) val freeIndex = OHToUInt(freeOH) freeOH := ~(leftOR(~used) << 1) & ~used ioReservedIndex := freeIndex val validSet = WireDefault(0.U(params.numLists.W)) val validClr = WireDefault(0.U(params.numLists.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) val dataIsPresentSet = WireDefault(0.U(params.numEntries.W)) val dataIsPresentClr = WireDefault(0.U(params.numEntries.W)) valid := (valid & ~validClr) | validSet used := (used & ~usedClr) | usedSet dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet /* Reservation logic signals */ val reserveTail = Wire(UInt(params.entryBits.W)) val reserveIsValid = Wire(Bool()) /* Response logic signals */ val responseIndex = Wire(UInt(params.entryBits.W)) val responseListIndex = Wire(UInt(params.listBits.W)) val responseHead = Wire(UInt(params.entryBits.W)) val responseTail = Wire(UInt(params.entryBits.W)) val nextResponseHead = Wire(UInt(params.entryBits.W)) val nextDataIsPresent = Wire(Bool()) val isResponseInOrder = Wire(Bool()) val isEndOfList = Wire(Bool()) val isLastBeat = Wire(Bool()) val isLastResponseBeat = Wire(Bool()) val isLastUnwindBeat = Wire(Bool()) /* Reservation logic */ reserveTail := tail.read(ioReserve.bits) reserveIsValid := valid(ioReserve.bits) ioReserve.ready := !used.andR // When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we // actually start a new list, rather than appending to a list that's about to disappear. val reserveResponseSameList = ioReserve.bits === responseListIndex val appendToAndDestroyList = ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat when(ioReserve.fire) { validSet := UIntToOH(ioReserve.bits, params.numLists) usedSet := freeOH when(reserveIsValid && !appendToAndDestroyList) { next.write(reserveTail, freeIndex) }.otherwise { head.write(ioReserve.bits, freeIndex) } tail.write(ioReserve.bits, freeIndex) map.write(freeIndex, ioReserve.bits) } /* Response logic */ // The majority of the response logic (reading from and writing to the various RAMs) is common between the // response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid). // The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the // 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and // response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after // two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker. responseHead := head.read(responseListIndex) responseTail := tail.read(responseListIndex) nextResponseHead := next.read(responseIndex) nextDataIsPresent := dataIsPresent(nextResponseHead) // Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since // there isn't a next element in the linked list. isResponseInOrder := responseHead === responseIndex isEndOfList := responseHead === responseTail isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1 // When a response's last beat is sent to the output channel, mark it as completed. This can happen in two // situations: // 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM // reservation was never needed. // 2. An entry is read out of the 'data' SRAM (within the unwind FSM). when(ioDataOut.fire && isLastBeat) { // Mark the reservation as no-longer-used. usedClr := UIntToOH(responseIndex, params.numEntries) // If the response is in-order, then we're popping an element from this linked list. when(isEndOfList) { // Once we pop the last element from a linked list, mark it as no-longer-present. validClr := UIntToOH(responseListIndex, params.numLists) }.otherwise { // Move the linked list's head pointer to the new head pointer. head.write(responseListIndex, nextResponseHead) } } // If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding. when(ioResponse.fire && !isResponseInOrder) { dataMemWriteEnable := true.B when(isLastResponseBeat) { dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries) beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1) } } // Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to. val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats) (responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) => when(select && dataMemWriteEnable) { seqMem.write(ioResponse.bits.index, ioResponse.bits.data) } } /* Response unwind logic */ // Unwind FSM state definitions val sIdle :: sUnwinding :: Nil = Enum(2) val unwindState = RegInit(sIdle) val busyUnwinding = unwindState === sUnwinding val startUnwind = Wire(Bool()) val stopUnwind = Wire(Bool()) when(startUnwind) { unwindState := sUnwinding }.elsewhen(stopUnwind) { unwindState := sIdle } assert(!(startUnwind && stopUnwind)) // Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to // become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is // invalid. // // Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to // worry about overwriting the 'data' SRAM's output when we start the unwind FSM. startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent // Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of // two things happens: // 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent) // 2. There are no more outstanding responses in this list (isEndOfList) // // Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are // passing from 'ioResponse' to 'ioDataOut'. stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList) val isUnwindBurstOver = Wire(Bool()) val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable) // Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of // beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we // increment 'beatCounter' until it reaches 'unwindBeats1'. val unwindBeats1 = Reg(UInt(params.beatBits.W)) val nextBeatCounter = Wire(UInt(params.beatBits.W)) val beatCounter = RegNext(nextBeatCounter) isUnwindBurstOver := beatCounter === unwindBeats1 when(startNewBurst) { unwindBeats1 := beats.read(nextResponseHead) nextBeatCounter := 0.U }.elsewhen(dataMemReadEnable) { nextBeatCounter := beatCounter + 1.U }.otherwise { nextBeatCounter := beatCounter } // When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next // entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which // happens at the start of reading a new stored burst). val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst) responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index) // Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the // SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead // holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'. val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex) // The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid // until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle). val unwindDataIsValid = RegInit(false.B) when(dataMemReadEnable) { unwindDataIsValid := true.B }.elsewhen(ioDataOut.fire) { unwindDataIsValid := false.B } isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid // Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats. isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat) // Select which SRAM to read from based on the beat counter. val dataOutputVec = Wire(Vec(params.numBeats, gen)) val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats) (nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) => dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable) } // Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured // by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading // from. val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable) // Mark 'data' burst entries as no-longer-present as they get read out of the SRAM. when(dataMemReadEnable) { dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries) } // As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue // a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know // we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be // consumed by the output channel). val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem) // While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need // 'responseListIndex' to be coherent for the entire unwind process. val rawResponseListIndex = map.read(responseIndex) val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst) responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex) // Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are // just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that // could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be // single-ported. ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding // Either pass an in-order response to the output channel, or data read from the unwind FSM. ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder) ioDataOut.bits.listIndex := responseListIndex ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data) // It's an error to get a response that isn't associated with a valid linked list. when(ioResponse.fire || unwindDataIsValid) { assert( valid(responseListIndex), "No linked list exists at index %d, mapped from %d", responseListIndex, responseIndex ) } when(busyUnwinding && dataMemReadEnable) { assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order") } } /** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1. * * Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to * reorder any responses, or store any linked lists. */ class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1") val used = RegInit(0.U(params.numEntries.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) used := (used & ~usedClr) | usedSet ioReserve.ready := used === 0.U // Store which list index was reserved, we need to return this value when we get a response. when(ioReserve.fire) { usedSet := 1.U map.write(0.U, ioReserve.bits) } // There's only one valid linked list entry, which is at index 0. ioReservedIndex := 0.U val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1 // Mark the linked list as empty when we get the last beat in a response. // Note that 'ioResponse.fire === ioDataOut.fire'. when(ioResponse.fire && isLastResponseBeat) { usedClr := 1.U } // Always pass the response data straight through, since we never need to reorder the response data. ioDataOut.bits.listIndex := map.read(0.U) ioDataOut.bits.payload := ioResponse.bits.data ioDataOut.valid := ioResponse.valid ioResponse.ready := ioDataOut.ready }
module dataMems_400( // @[UnsafeAXI4ToTL.scala:365:62] input [4:0] R0_addr, input R0_en, input R0_clk, output [66:0] R0_data, input [4:0] W0_addr, input W0_en, input W0_clk, input [66:0] W0_data ); dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62] .R0_addr (R0_addr), .R0_en (R0_en), .R0_clk (R0_clk), .R0_data (R0_data), .W0_addr (W0_addr), .W0_en (W0_en), .W0_clk (W0_clk), .W0_data (W0_data) ); // @[UnsafeAXI4ToTL.scala:365:62] endmodule
Generate the Verilog code corresponding to the following Chisel files. File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag }
module OptimizationBarrier_TLBEntryData_11( // @[package.scala:267:30] input clock, // @[package.scala:267:30] input reset, // @[package.scala:267:30] input [19:0] io_x_ppn, // @[package.scala:268:18] input io_x_u, // @[package.scala:268:18] input io_x_g, // @[package.scala:268:18] input io_x_ae_ptw, // @[package.scala:268:18] input io_x_ae_final, // @[package.scala:268:18] input io_x_ae_stage2, // @[package.scala:268:18] input io_x_pf, // @[package.scala:268:18] input io_x_gf, // @[package.scala:268:18] input io_x_sw, // @[package.scala:268:18] input io_x_sx, // @[package.scala:268:18] input io_x_sr, // @[package.scala:268:18] input io_x_hw, // @[package.scala:268:18] input io_x_hx, // @[package.scala:268:18] input io_x_hr, // @[package.scala:268:18] input io_x_pw, // @[package.scala:268:18] input io_x_px, // @[package.scala:268:18] input io_x_pr, // @[package.scala:268:18] input io_x_ppp, // @[package.scala:268:18] input io_x_pal, // @[package.scala:268:18] input io_x_paa, // @[package.scala:268:18] input io_x_eff, // @[package.scala:268:18] input io_x_c, // @[package.scala:268:18] input io_x_fragmented_superpage, // @[package.scala:268:18] output io_y_u, // @[package.scala:268:18] output io_y_ae_ptw, // @[package.scala:268:18] output io_y_ae_final, // @[package.scala:268:18] output io_y_ae_stage2, // @[package.scala:268:18] output io_y_pf, // @[package.scala:268:18] output io_y_gf, // @[package.scala:268:18] output io_y_sw, // @[package.scala:268:18] output io_y_sx, // @[package.scala:268:18] output io_y_sr, // @[package.scala:268:18] output io_y_hw, // @[package.scala:268:18] output io_y_hx, // @[package.scala:268:18] output io_y_hr, // @[package.scala:268:18] output io_y_pw, // @[package.scala:268:18] output io_y_px, // @[package.scala:268:18] output io_y_pr, // @[package.scala:268:18] output io_y_ppp, // @[package.scala:268:18] output io_y_pal, // @[package.scala:268:18] output io_y_paa, // @[package.scala:268:18] output io_y_eff, // @[package.scala:268:18] output io_y_c // @[package.scala:268:18] ); wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30] wire io_x_u_0 = io_x_u; // @[package.scala:267:30] wire io_x_g_0 = io_x_g; // @[package.scala:267:30] wire io_x_ae_ptw_0 = io_x_ae_ptw; // @[package.scala:267:30] wire io_x_ae_final_0 = io_x_ae_final; // @[package.scala:267:30] wire io_x_ae_stage2_0 = io_x_ae_stage2; // @[package.scala:267:30] wire io_x_pf_0 = io_x_pf; // @[package.scala:267:30] wire io_x_gf_0 = io_x_gf; // @[package.scala:267:30] wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30] wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30] wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30] wire io_x_hw_0 = io_x_hw; // @[package.scala:267:30] wire io_x_hx_0 = io_x_hx; // @[package.scala:267:30] wire io_x_hr_0 = io_x_hr; // @[package.scala:267:30] wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30] wire io_x_px_0 = io_x_px; // @[package.scala:267:30] wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30] wire io_x_ppp_0 = io_x_ppp; // @[package.scala:267:30] wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30] wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30] wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30] wire io_x_c_0 = io_x_c; // @[package.scala:267:30] wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30] wire [19:0] io_y_ppn = io_x_ppn_0; // @[package.scala:267:30] wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30] wire io_y_g = io_x_g_0; // @[package.scala:267:30] wire io_y_ae_ptw_0 = io_x_ae_ptw_0; // @[package.scala:267:30] wire io_y_ae_final_0 = io_x_ae_final_0; // @[package.scala:267:30] wire io_y_ae_stage2_0 = io_x_ae_stage2_0; // @[package.scala:267:30] wire io_y_pf_0 = io_x_pf_0; // @[package.scala:267:30] wire io_y_gf_0 = io_x_gf_0; // @[package.scala:267:30] wire io_y_sw_0 = io_x_sw_0; // @[package.scala:267:30] wire io_y_sx_0 = io_x_sx_0; // @[package.scala:267:30] wire io_y_sr_0 = io_x_sr_0; // @[package.scala:267:30] wire io_y_hw_0 = io_x_hw_0; // @[package.scala:267:30] wire io_y_hx_0 = io_x_hx_0; // @[package.scala:267:30] wire io_y_hr_0 = io_x_hr_0; // @[package.scala:267:30] wire io_y_pw_0 = io_x_pw_0; // @[package.scala:267:30] wire io_y_px_0 = io_x_px_0; // @[package.scala:267:30] wire io_y_pr_0 = io_x_pr_0; // @[package.scala:267:30] wire io_y_ppp_0 = io_x_ppp_0; // @[package.scala:267:30] wire io_y_pal_0 = io_x_pal_0; // @[package.scala:267:30] wire io_y_paa_0 = io_x_paa_0; // @[package.scala:267:30] wire io_y_eff_0 = io_x_eff_0; // @[package.scala:267:30] wire io_y_c_0 = io_x_c_0; // @[package.scala:267:30] wire io_y_fragmented_superpage = io_x_fragmented_superpage_0; // @[package.scala:267:30] assign io_y_u = io_y_u_0; // @[package.scala:267:30] assign io_y_ae_ptw = io_y_ae_ptw_0; // @[package.scala:267:30] assign io_y_ae_final = io_y_ae_final_0; // @[package.scala:267:30] assign io_y_ae_stage2 = io_y_ae_stage2_0; // @[package.scala:267:30] assign io_y_pf = io_y_pf_0; // @[package.scala:267:30] assign io_y_gf = io_y_gf_0; // @[package.scala:267:30] assign io_y_sw = io_y_sw_0; // @[package.scala:267:30] assign io_y_sx = io_y_sx_0; // @[package.scala:267:30] assign io_y_sr = io_y_sr_0; // @[package.scala:267:30] assign io_y_hw = io_y_hw_0; // @[package.scala:267:30] assign io_y_hx = io_y_hx_0; // @[package.scala:267:30] assign io_y_hr = io_y_hr_0; // @[package.scala:267:30] assign io_y_pw = io_y_pw_0; // @[package.scala:267:30] assign io_y_px = io_y_px_0; // @[package.scala:267:30] assign io_y_pr = io_y_pr_0; // @[package.scala:267:30] assign io_y_ppp = io_y_ppp_0; // @[package.scala:267:30] assign io_y_pal = io_y_pal_0; // @[package.scala:267:30] assign io_y_paa = io_y_paa_0; // @[package.scala:267:30] assign io_y_eff = io_y_eff_0; // @[package.scala:267:30] assign io_y_c = io_y_c_0; // @[package.scala:267:30] endmodule
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } }
module PE_450( // @[PE.scala:31:7] input clock, // @[PE.scala:31:7] input reset, // @[PE.scala:31:7] input [7:0] io_in_a, // @[PE.scala:35:14] input [19:0] io_in_b, // @[PE.scala:35:14] input [19:0] io_in_d, // @[PE.scala:35:14] output [7:0] io_out_a, // @[PE.scala:35:14] output [19:0] io_out_b, // @[PE.scala:35:14] output [19:0] io_out_c, // @[PE.scala:35:14] input io_in_control_dataflow, // @[PE.scala:35:14] input io_in_control_propagate, // @[PE.scala:35:14] input [4:0] io_in_control_shift, // @[PE.scala:35:14] output io_out_control_dataflow, // @[PE.scala:35:14] output io_out_control_propagate, // @[PE.scala:35:14] output [4:0] io_out_control_shift, // @[PE.scala:35:14] input [2:0] io_in_id, // @[PE.scala:35:14] output [2:0] io_out_id, // @[PE.scala:35:14] input io_in_last, // @[PE.scala:35:14] output io_out_last, // @[PE.scala:35:14] input io_in_valid, // @[PE.scala:35:14] output io_out_valid, // @[PE.scala:35:14] output io_bad_dataflow // @[PE.scala:35:14] ); wire [19:0] _mac_unit_io_out_d; // @[PE.scala:64:24] wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7] wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7] wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7] wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7] wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7] wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7] wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7] wire io_in_last_0 = io_in_last; // @[PE.scala:31:7] wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7] wire io_bad_dataflow_0 = 1'h0; // @[PE.scala:31:7] wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7] wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37] wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37] wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35] wire [19:0] c1_lo_1 = io_in_d_0; // @[PE.scala:31:7] wire [19:0] c2_lo_1 = io_in_d_0; // @[PE.scala:31:7] wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7] wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7] wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7] wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7] wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7] wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7] wire [19:0] io_out_b_0; // @[PE.scala:31:7] wire [19:0] io_out_c_0; // @[PE.scala:31:7] reg [31:0] c1; // @[PE.scala:70:15] wire [31:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15] wire [31:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38] reg [31:0] c2; // @[PE.scala:71:15] wire [31:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15] wire [31:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38] reg last_s; // @[PE.scala:89:25] wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21] wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25] wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25] wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32] assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32] wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32] assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32] wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25] wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53] assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53] wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66] assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66] wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53] assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53] wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66] assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66] wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53] wire [31:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15] wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50] wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}] wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25] wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27] assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27] wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27] assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27] wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66] wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}] wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}] wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81] wire [31:0] _io_out_c_zeros_T_7 = _io_out_c_zeros_T_1 & _io_out_c_zeros_T_6; // @[Arithmetic.scala:102:{45,52,81}] wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}] wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}] wire [31:0] _GEN_2 = {27'h0, shift_offset}; // @[PE.scala:91:25] wire [31:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15] wire [31:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30] assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30] wire [31:0] _io_out_c_T; // @[Arithmetic.scala:107:15] assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15] wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30] wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38] wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}] wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}] wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33] wire [32:0] _io_out_c_T_2 = {_io_out_c_T[31], _io_out_c_T} + {{31{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}] wire [31:0] _io_out_c_T_3 = _io_out_c_T_2[31:0]; // @[Arithmetic.scala:107:28] wire [31:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28] wire _io_out_c_T_5 = $signed(_io_out_c_T_4) > 32'sh7FFFF; // @[Arithmetic.scala:107:28, :125:33] wire _io_out_c_T_6 = $signed(_io_out_c_T_4) < -32'sh80000; // @[Arithmetic.scala:107:28, :125:60] wire [31:0] _io_out_c_T_7 = _io_out_c_T_6 ? 32'hFFF80000 : _io_out_c_T_4; // @[Mux.scala:126:16] wire [31:0] _io_out_c_T_8 = _io_out_c_T_5 ? 32'h7FFFF : _io_out_c_T_7; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_9 = _io_out_c_T_8[19:0]; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}] wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37] wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37] wire c1_sign = io_in_d_0[19]; // @[PE.scala:31:7] wire c2_sign = io_in_d_0[19]; // @[PE.scala:31:7] wire [1:0] _GEN_4 = {2{c1_sign}}; // @[Arithmetic.scala:117:26, :118:18] wire [1:0] c1_lo_lo_hi; // @[Arithmetic.scala:118:18] assign c1_lo_lo_hi = _GEN_4; // @[Arithmetic.scala:118:18] wire [1:0] c1_lo_hi_hi; // @[Arithmetic.scala:118:18] assign c1_lo_hi_hi = _GEN_4; // @[Arithmetic.scala:118:18] wire [1:0] c1_hi_lo_hi; // @[Arithmetic.scala:118:18] assign c1_hi_lo_hi = _GEN_4; // @[Arithmetic.scala:118:18] wire [1:0] c1_hi_hi_hi; // @[Arithmetic.scala:118:18] assign c1_hi_hi_hi = _GEN_4; // @[Arithmetic.scala:118:18] wire [2:0] c1_lo_lo = {c1_lo_lo_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [2:0] c1_lo_hi = {c1_lo_hi_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [5:0] c1_lo = {c1_lo_hi, c1_lo_lo}; // @[Arithmetic.scala:118:18] wire [2:0] c1_hi_lo = {c1_hi_lo_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [2:0] c1_hi_hi = {c1_hi_hi_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [5:0] c1_hi = {c1_hi_hi, c1_hi_lo}; // @[Arithmetic.scala:118:18] wire [11:0] _c1_T = {c1_hi, c1_lo}; // @[Arithmetic.scala:118:18] wire [31:0] _c1_T_1 = {_c1_T, c1_lo_1}; // @[Arithmetic.scala:118:{14,18}] wire [31:0] _c1_T_2 = _c1_T_1; // @[Arithmetic.scala:118:{14,61}] wire [31:0] _c1_WIRE = _c1_T_2; // @[Arithmetic.scala:118:61] wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53] wire [31:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15] wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50] wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}] wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66] wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}] wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}] wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81] wire [31:0] _io_out_c_zeros_T_16 = _io_out_c_zeros_T_10 & _io_out_c_zeros_T_15; // @[Arithmetic.scala:102:{45,52,81}] wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}] wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}] wire [31:0] _GEN_5 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15] wire [31:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30] assign _io_out_c_ones_digit_T_1 = _GEN_5; // @[Arithmetic.scala:103:30] wire [31:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15] assign _io_out_c_T_11 = _GEN_5; // @[Arithmetic.scala:103:30, :107:15] wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30] wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38] wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}] wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}] wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33] wire [32:0] _io_out_c_T_13 = {_io_out_c_T_11[31], _io_out_c_T_11} + {{31{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}] wire [31:0] _io_out_c_T_14 = _io_out_c_T_13[31:0]; // @[Arithmetic.scala:107:28] wire [31:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28] wire _io_out_c_T_16 = $signed(_io_out_c_T_15) > 32'sh7FFFF; // @[Arithmetic.scala:107:28, :125:33] wire _io_out_c_T_17 = $signed(_io_out_c_T_15) < -32'sh80000; // @[Arithmetic.scala:107:28, :125:60] wire [31:0] _io_out_c_T_18 = _io_out_c_T_17 ? 32'hFFF80000 : _io_out_c_T_15; // @[Mux.scala:126:16] wire [31:0] _io_out_c_T_19 = _io_out_c_T_16 ? 32'h7FFFF : _io_out_c_T_18; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_20 = _io_out_c_T_19[19:0]; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}] wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37] wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37] wire [1:0] _GEN_6 = {2{c2_sign}}; // @[Arithmetic.scala:117:26, :118:18] wire [1:0] c2_lo_lo_hi; // @[Arithmetic.scala:118:18] assign c2_lo_lo_hi = _GEN_6; // @[Arithmetic.scala:118:18] wire [1:0] c2_lo_hi_hi; // @[Arithmetic.scala:118:18] assign c2_lo_hi_hi = _GEN_6; // @[Arithmetic.scala:118:18] wire [1:0] c2_hi_lo_hi; // @[Arithmetic.scala:118:18] assign c2_hi_lo_hi = _GEN_6; // @[Arithmetic.scala:118:18] wire [1:0] c2_hi_hi_hi; // @[Arithmetic.scala:118:18] assign c2_hi_hi_hi = _GEN_6; // @[Arithmetic.scala:118:18] wire [2:0] c2_lo_lo = {c2_lo_lo_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [2:0] c2_lo_hi = {c2_lo_hi_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [5:0] c2_lo = {c2_lo_hi, c2_lo_lo}; // @[Arithmetic.scala:118:18] wire [2:0] c2_hi_lo = {c2_hi_lo_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [2:0] c2_hi_hi = {c2_hi_hi_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [5:0] c2_hi = {c2_hi_hi, c2_hi_lo}; // @[Arithmetic.scala:118:18] wire [11:0] _c2_T = {c2_hi, c2_lo}; // @[Arithmetic.scala:118:18] wire [31:0] _c2_T_1 = {_c2_T, c2_lo_1}; // @[Arithmetic.scala:118:{14,18}] wire [31:0] _c2_T_2 = _c2_T_1; // @[Arithmetic.scala:118:{14,61}] wire [31:0] _c2_WIRE = _c2_T_2; // @[Arithmetic.scala:118:61] wire [31:0] _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38] wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5[7:0]; // @[PE.scala:121:38] wire [31:0] _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38] wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7[7:0]; // @[PE.scala:127:38] assign io_out_c_0 = io_in_control_dataflow_0 ? (io_in_control_propagate_0 ? c1[19:0] : c2[19:0]) : io_in_control_propagate_0 ? _io_out_c_T_10 : _io_out_c_T_21; // @[PE.scala:31:7, :70:15, :71:15, :102:95, :103:30, :104:16, :111:16, :118:101, :119:30, :120:16, :126:16] assign io_out_b_0 = io_in_control_dataflow_0 ? _mac_unit_io_out_d : io_in_b_0; // @[PE.scala:31:7, :64:24, :102:95, :103:30, :118:101] wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35] wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35] wire [31:0] _GEN_7 = {{12{io_in_d_0[19]}}, io_in_d_0}; // @[PE.scala:31:7, :124:10] wire [31:0] _GEN_8 = {{12{_mac_unit_io_out_d[19]}}, _mac_unit_io_out_d}; // @[PE.scala:64:24, :108:10] always @(posedge clock) begin // @[PE.scala:31:7] if (io_in_valid_0) begin // @[PE.scala:31:7] if (io_in_control_dataflow_0) begin // @[PE.scala:31:7] if (io_in_control_dataflow_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :70:15, :118:101, :119:30, :124:10] c1 <= _GEN_7; // @[PE.scala:70:15, :124:10] if (~io_in_control_dataflow_0 | io_in_control_propagate_0) begin // @[PE.scala:31:7, :71:15, :118:101, :119:30] end else // @[PE.scala:71:15, :118:101, :119:30] c2 <= _GEN_7; // @[PE.scala:71:15, :124:10] end else begin // @[PE.scala:31:7] c1 <= io_in_control_propagate_0 ? _c1_WIRE : _GEN_8; // @[PE.scala:31:7, :70:15, :103:30, :108:10, :109:10, :115:10] c2 <= io_in_control_propagate_0 ? _GEN_8 : _c2_WIRE; // @[PE.scala:31:7, :71:15, :103:30, :108:10, :116:10] end last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25] end always @(posedge) MacUnit_194 mac_unit ( // @[PE.scala:64:24] .clock (clock), .reset (reset), .io_in_a (io_in_a_0), // @[PE.scala:31:7] .io_in_b (io_in_control_dataflow_0 ? (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3) : io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE : _mac_unit_io_in_b_WIRE_1), // @[PE.scala:31:7, :102:95, :103:30, :106:{24,37}, :113:{24,37}, :118:101, :119:30, :121:{24,38}, :127:{24,38}] .io_in_c (io_in_control_dataflow_0 ? {{12{io_in_b_0[19]}}, io_in_b_0} : io_in_control_propagate_0 ? c2 : c1), // @[PE.scala:31:7, :70:15, :71:15, :102:95, :103:30, :107:24, :114:24, :118:101, :122:24] .io_out_d (_mac_unit_io_out_d) ); // @[PE.scala:64:24] assign io_out_a = io_out_a_0; // @[PE.scala:31:7] assign io_out_b = io_out_b_0; // @[PE.scala:31:7] assign io_out_c = io_out_c_0; // @[PE.scala:31:7] assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7] assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7] assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7] assign io_out_id = io_out_id_0; // @[PE.scala:31:7] assign io_out_last = io_out_last_0; // @[PE.scala:31:7] assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7] assign io_bad_dataflow = io_bad_dataflow_0; // @[PE.scala:31:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File recFNFromFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ object recFNFromFN { def apply(expWidth: Int, sigWidth: Int, in: Bits) = { val rawIn = rawFloatFromFN(expWidth, sigWidth, in) rawIn.sign ## (Mux(rawIn.isZero, 0.U(3.W), rawIn.sExp(expWidth, expWidth - 2)) | Mux(rawIn.isNaN, 1.U, 0.U)) ## rawIn.sExp(expWidth - 3, 0) ## rawIn.sig(sigWidth - 2, 0) } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } } File fNFromRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ object fNFromRecFN { def apply(expWidth: Int, sigWidth: Int, in: Bits) = { val minNormExp = (BigInt(1)<<(expWidth - 1)) + 2 val rawIn = rawFloatFromRecFN(expWidth, sigWidth, in) val isSubnormal = rawIn.sExp < minNormExp.S val denormShiftDist = 1.U - rawIn.sExp(log2Up(sigWidth - 1) - 1, 0) val denormFract = ((rawIn.sig>>1)>>denormShiftDist)(sigWidth - 2, 0) val expOut = Mux(isSubnormal, 0.U, rawIn.sExp(expWidth - 1, 0) - ((BigInt(1)<<(expWidth - 1)) + 1).U ) | Fill(expWidth, rawIn.isNaN || rawIn.isInf) val fractOut = Mux(isSubnormal, denormFract, Mux(rawIn.isInf, 0.U, rawIn.sig(sigWidth - 2, 0)) ) Cat(rawIn.sign, expOut, fractOut) } } File rawFloatFromFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ object rawFloatFromFN { def apply(expWidth: Int, sigWidth: Int, in: Bits) = { val sign = in(expWidth + sigWidth - 1) val expIn = in(expWidth + sigWidth - 2, sigWidth - 1) val fractIn = in(sigWidth - 2, 0) val isZeroExpIn = (expIn === 0.U) val isZeroFractIn = (fractIn === 0.U) val normDist = countLeadingZeros(fractIn) val subnormFract = (fractIn << normDist) (sigWidth - 3, 0) << 1 val adjustedExp = Mux(isZeroExpIn, normDist ^ ((BigInt(1) << (expWidth + 1)) - 1).U, expIn ) + ((BigInt(1) << (expWidth - 1)).U | Mux(isZeroExpIn, 2.U, 1.U)) val isZero = isZeroExpIn && isZeroFractIn val isSpecial = adjustedExp(expWidth, expWidth - 1) === 3.U val out = Wire(new RawFloat(expWidth, sigWidth)) out.isNaN := isSpecial && !isZeroFractIn out.isInf := isSpecial && isZeroFractIn out.isZero := isZero out.sign := sign out.sExp := adjustedExp(expWidth, 0).zext out.sig := 0.U(1.W) ## !isZero ## Mux(isZeroExpIn, subnormFract, fractIn) out } } File rawFloatFromRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ /*---------------------------------------------------------------------------- | In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be | set. *----------------------------------------------------------------------------*/ object rawFloatFromRecFN { def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat = { val exp = in(expWidth + sigWidth - 1, sigWidth - 1) val isZero = exp(expWidth, expWidth - 2) === 0.U val isSpecial = exp(expWidth, expWidth - 1) === 3.U val out = Wire(new RawFloat(expWidth, sigWidth)) out.isNaN := isSpecial && exp(expWidth - 2) out.isInf := isSpecial && ! exp(expWidth - 2) out.isZero := isZero out.sign := in(expWidth + sigWidth) out.sExp := exp.zext out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0) out } }
module MacUnit_9( // @[PE.scala:14:7] input clock, // @[PE.scala:14:7] input reset, // @[PE.scala:14:7] input [31:0] io_in_a_bits, // @[PE.scala:16:14] input [31:0] io_in_b_bits, // @[PE.scala:16:14] input [31:0] io_in_c_bits, // @[PE.scala:16:14] output [31:0] io_out_d_bits // @[PE.scala:16:14] ); wire io_out_d_self_rec_rawIn_isNaN; // @[rawFloatFromFN.scala:63:19] wire io_out_d_m2_rec_rawIn_isNaN; // @[rawFloatFromFN.scala:63:19] wire io_out_d_m1_rec_rawIn_isNaN; // @[rawFloatFromFN.scala:63:19] wire [32:0] _io_out_d_muladder_io_out; // @[Arithmetic.scala:376:30] wire [32:0] _io_out_d_m2_resizer_io_out; // @[Arithmetic.scala:369:32] wire [32:0] _io_out_d_m1_resizer_io_out; // @[Arithmetic.scala:362:32] wire [31:0] io_in_a_bits_0 = io_in_a_bits; // @[PE.scala:14:7] wire [31:0] io_in_b_bits_0 = io_in_b_bits; // @[PE.scala:14:7] wire [31:0] io_in_c_bits_0 = io_in_c_bits; // @[PE.scala:14:7] wire [31:0] io_out_d_out_bits; // @[Arithmetic.scala:387:23] wire [31:0] io_out_d_bits_0; // @[PE.scala:14:7] wire io_out_d_m1_rec_rawIn_sign = io_in_a_bits_0[31]; // @[rawFloatFromFN.scala:44:18] wire io_out_d_m1_rec_rawIn_sign_0 = io_out_d_m1_rec_rawIn_sign; // @[rawFloatFromFN.scala:44:18, :63:19] wire [7:0] io_out_d_m1_rec_rawIn_expIn = io_in_a_bits_0[30:23]; // @[rawFloatFromFN.scala:45:19] wire [22:0] io_out_d_m1_rec_rawIn_fractIn = io_in_a_bits_0[22:0]; // @[rawFloatFromFN.scala:46:21] wire io_out_d_m1_rec_rawIn_isZeroExpIn = io_out_d_m1_rec_rawIn_expIn == 8'h0; // @[rawFloatFromFN.scala:45:19, :48:30] wire io_out_d_m1_rec_rawIn_isZeroFractIn = io_out_d_m1_rec_rawIn_fractIn == 23'h0; // @[rawFloatFromFN.scala:46:21, :49:34] wire _io_out_d_m1_rec_rawIn_normDist_T = io_out_d_m1_rec_rawIn_fractIn[0]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_1 = io_out_d_m1_rec_rawIn_fractIn[1]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_2 = io_out_d_m1_rec_rawIn_fractIn[2]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_3 = io_out_d_m1_rec_rawIn_fractIn[3]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_4 = io_out_d_m1_rec_rawIn_fractIn[4]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_5 = io_out_d_m1_rec_rawIn_fractIn[5]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_6 = io_out_d_m1_rec_rawIn_fractIn[6]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_7 = io_out_d_m1_rec_rawIn_fractIn[7]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_8 = io_out_d_m1_rec_rawIn_fractIn[8]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_9 = io_out_d_m1_rec_rawIn_fractIn[9]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_10 = io_out_d_m1_rec_rawIn_fractIn[10]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_11 = io_out_d_m1_rec_rawIn_fractIn[11]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_12 = io_out_d_m1_rec_rawIn_fractIn[12]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_13 = io_out_d_m1_rec_rawIn_fractIn[13]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_14 = io_out_d_m1_rec_rawIn_fractIn[14]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_15 = io_out_d_m1_rec_rawIn_fractIn[15]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_16 = io_out_d_m1_rec_rawIn_fractIn[16]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_17 = io_out_d_m1_rec_rawIn_fractIn[17]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_18 = io_out_d_m1_rec_rawIn_fractIn[18]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_19 = io_out_d_m1_rec_rawIn_fractIn[19]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_20 = io_out_d_m1_rec_rawIn_fractIn[20]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_21 = io_out_d_m1_rec_rawIn_fractIn[21]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_22 = io_out_d_m1_rec_rawIn_fractIn[22]; // @[rawFloatFromFN.scala:46:21] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_23 = _io_out_d_m1_rec_rawIn_normDist_T_1 ? 5'h15 : 5'h16; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_24 = _io_out_d_m1_rec_rawIn_normDist_T_2 ? 5'h14 : _io_out_d_m1_rec_rawIn_normDist_T_23; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_25 = _io_out_d_m1_rec_rawIn_normDist_T_3 ? 5'h13 : _io_out_d_m1_rec_rawIn_normDist_T_24; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_26 = _io_out_d_m1_rec_rawIn_normDist_T_4 ? 5'h12 : _io_out_d_m1_rec_rawIn_normDist_T_25; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_27 = _io_out_d_m1_rec_rawIn_normDist_T_5 ? 5'h11 : _io_out_d_m1_rec_rawIn_normDist_T_26; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_28 = _io_out_d_m1_rec_rawIn_normDist_T_6 ? 5'h10 : _io_out_d_m1_rec_rawIn_normDist_T_27; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_29 = _io_out_d_m1_rec_rawIn_normDist_T_7 ? 5'hF : _io_out_d_m1_rec_rawIn_normDist_T_28; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_30 = _io_out_d_m1_rec_rawIn_normDist_T_8 ? 5'hE : _io_out_d_m1_rec_rawIn_normDist_T_29; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_31 = _io_out_d_m1_rec_rawIn_normDist_T_9 ? 5'hD : _io_out_d_m1_rec_rawIn_normDist_T_30; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_32 = _io_out_d_m1_rec_rawIn_normDist_T_10 ? 5'hC : _io_out_d_m1_rec_rawIn_normDist_T_31; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_33 = _io_out_d_m1_rec_rawIn_normDist_T_11 ? 5'hB : _io_out_d_m1_rec_rawIn_normDist_T_32; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_34 = _io_out_d_m1_rec_rawIn_normDist_T_12 ? 5'hA : _io_out_d_m1_rec_rawIn_normDist_T_33; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_35 = _io_out_d_m1_rec_rawIn_normDist_T_13 ? 5'h9 : _io_out_d_m1_rec_rawIn_normDist_T_34; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_36 = _io_out_d_m1_rec_rawIn_normDist_T_14 ? 5'h8 : _io_out_d_m1_rec_rawIn_normDist_T_35; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_37 = _io_out_d_m1_rec_rawIn_normDist_T_15 ? 5'h7 : _io_out_d_m1_rec_rawIn_normDist_T_36; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_38 = _io_out_d_m1_rec_rawIn_normDist_T_16 ? 5'h6 : _io_out_d_m1_rec_rawIn_normDist_T_37; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_39 = _io_out_d_m1_rec_rawIn_normDist_T_17 ? 5'h5 : _io_out_d_m1_rec_rawIn_normDist_T_38; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_40 = _io_out_d_m1_rec_rawIn_normDist_T_18 ? 5'h4 : _io_out_d_m1_rec_rawIn_normDist_T_39; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_41 = _io_out_d_m1_rec_rawIn_normDist_T_19 ? 5'h3 : _io_out_d_m1_rec_rawIn_normDist_T_40; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_42 = _io_out_d_m1_rec_rawIn_normDist_T_20 ? 5'h2 : _io_out_d_m1_rec_rawIn_normDist_T_41; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_43 = _io_out_d_m1_rec_rawIn_normDist_T_21 ? 5'h1 : _io_out_d_m1_rec_rawIn_normDist_T_42; // @[Mux.scala:50:70] wire [4:0] io_out_d_m1_rec_rawIn_normDist = _io_out_d_m1_rec_rawIn_normDist_T_22 ? 5'h0 : _io_out_d_m1_rec_rawIn_normDist_T_43; // @[Mux.scala:50:70] wire [53:0] _io_out_d_m1_rec_rawIn_subnormFract_T = {31'h0, io_out_d_m1_rec_rawIn_fractIn} << io_out_d_m1_rec_rawIn_normDist; // @[Mux.scala:50:70] wire [21:0] _io_out_d_m1_rec_rawIn_subnormFract_T_1 = _io_out_d_m1_rec_rawIn_subnormFract_T[21:0]; // @[rawFloatFromFN.scala:52:{33,46}] wire [22:0] io_out_d_m1_rec_rawIn_subnormFract = {_io_out_d_m1_rec_rawIn_subnormFract_T_1, 1'h0}; // @[rawFloatFromFN.scala:52:{46,64}] wire [8:0] _io_out_d_m1_rec_rawIn_adjustedExp_T = {4'hF, ~io_out_d_m1_rec_rawIn_normDist}; // @[Mux.scala:50:70] wire [8:0] _io_out_d_m1_rec_rawIn_adjustedExp_T_1 = io_out_d_m1_rec_rawIn_isZeroExpIn ? _io_out_d_m1_rec_rawIn_adjustedExp_T : {1'h0, io_out_d_m1_rec_rawIn_expIn}; // @[rawFloatFromFN.scala:45:19, :48:30, :54:10, :55:18] wire [1:0] _io_out_d_m1_rec_rawIn_adjustedExp_T_2 = io_out_d_m1_rec_rawIn_isZeroExpIn ? 2'h2 : 2'h1; // @[rawFloatFromFN.scala:48:30, :58:14] wire [7:0] _io_out_d_m1_rec_rawIn_adjustedExp_T_3 = {6'h20, _io_out_d_m1_rec_rawIn_adjustedExp_T_2}; // @[rawFloatFromFN.scala:58:{9,14}] wire [9:0] _io_out_d_m1_rec_rawIn_adjustedExp_T_4 = {1'h0, _io_out_d_m1_rec_rawIn_adjustedExp_T_1} + {2'h0, _io_out_d_m1_rec_rawIn_adjustedExp_T_3}; // @[rawFloatFromFN.scala:54:10, :57:9, :58:9] wire [8:0] io_out_d_m1_rec_rawIn_adjustedExp = _io_out_d_m1_rec_rawIn_adjustedExp_T_4[8:0]; // @[rawFloatFromFN.scala:57:9] wire [8:0] _io_out_d_m1_rec_rawIn_out_sExp_T = io_out_d_m1_rec_rawIn_adjustedExp; // @[rawFloatFromFN.scala:57:9, :68:28] wire io_out_d_m1_rec_rawIn_isZero = io_out_d_m1_rec_rawIn_isZeroExpIn & io_out_d_m1_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:48:30, :49:34, :60:30] wire io_out_d_m1_rec_rawIn_isZero_0 = io_out_d_m1_rec_rawIn_isZero; // @[rawFloatFromFN.scala:60:30, :63:19] wire [1:0] _io_out_d_m1_rec_rawIn_isSpecial_T = io_out_d_m1_rec_rawIn_adjustedExp[8:7]; // @[rawFloatFromFN.scala:57:9, :61:32] wire io_out_d_m1_rec_rawIn_isSpecial = &_io_out_d_m1_rec_rawIn_isSpecial_T; // @[rawFloatFromFN.scala:61:{32,57}] wire _io_out_d_m1_rec_rawIn_out_isNaN_T_1; // @[rawFloatFromFN.scala:64:28] wire _io_out_d_m1_rec_rawIn_out_isInf_T; // @[rawFloatFromFN.scala:65:28] wire _io_out_d_m1_rec_T_2 = io_out_d_m1_rec_rawIn_isNaN; // @[recFNFromFN.scala:49:20] wire [9:0] _io_out_d_m1_rec_rawIn_out_sExp_T_1; // @[rawFloatFromFN.scala:68:42] wire [24:0] _io_out_d_m1_rec_rawIn_out_sig_T_3; // @[rawFloatFromFN.scala:70:27] wire io_out_d_m1_rec_rawIn_isInf; // @[rawFloatFromFN.scala:63:19] wire [9:0] io_out_d_m1_rec_rawIn_sExp; // @[rawFloatFromFN.scala:63:19] wire [24:0] io_out_d_m1_rec_rawIn_sig; // @[rawFloatFromFN.scala:63:19] wire _io_out_d_m1_rec_rawIn_out_isNaN_T = ~io_out_d_m1_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:49:34, :64:31] assign _io_out_d_m1_rec_rawIn_out_isNaN_T_1 = io_out_d_m1_rec_rawIn_isSpecial & _io_out_d_m1_rec_rawIn_out_isNaN_T; // @[rawFloatFromFN.scala:61:57, :64:{28,31}] assign io_out_d_m1_rec_rawIn_isNaN = _io_out_d_m1_rec_rawIn_out_isNaN_T_1; // @[rawFloatFromFN.scala:63:19, :64:28] assign _io_out_d_m1_rec_rawIn_out_isInf_T = io_out_d_m1_rec_rawIn_isSpecial & io_out_d_m1_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:49:34, :61:57, :65:28] assign io_out_d_m1_rec_rawIn_isInf = _io_out_d_m1_rec_rawIn_out_isInf_T; // @[rawFloatFromFN.scala:63:19, :65:28] assign _io_out_d_m1_rec_rawIn_out_sExp_T_1 = {1'h0, _io_out_d_m1_rec_rawIn_out_sExp_T}; // @[rawFloatFromFN.scala:68:{28,42}] assign io_out_d_m1_rec_rawIn_sExp = _io_out_d_m1_rec_rawIn_out_sExp_T_1; // @[rawFloatFromFN.scala:63:19, :68:42] wire _io_out_d_m1_rec_rawIn_out_sig_T = ~io_out_d_m1_rec_rawIn_isZero; // @[rawFloatFromFN.scala:60:30, :70:19] wire [1:0] _io_out_d_m1_rec_rawIn_out_sig_T_1 = {1'h0, _io_out_d_m1_rec_rawIn_out_sig_T}; // @[rawFloatFromFN.scala:70:{16,19}] wire [22:0] _io_out_d_m1_rec_rawIn_out_sig_T_2 = io_out_d_m1_rec_rawIn_isZeroExpIn ? io_out_d_m1_rec_rawIn_subnormFract : io_out_d_m1_rec_rawIn_fractIn; // @[rawFloatFromFN.scala:46:21, :48:30, :52:64, :70:33] assign _io_out_d_m1_rec_rawIn_out_sig_T_3 = {_io_out_d_m1_rec_rawIn_out_sig_T_1, _io_out_d_m1_rec_rawIn_out_sig_T_2}; // @[rawFloatFromFN.scala:70:{16,27,33}] assign io_out_d_m1_rec_rawIn_sig = _io_out_d_m1_rec_rawIn_out_sig_T_3; // @[rawFloatFromFN.scala:63:19, :70:27] wire [2:0] _io_out_d_m1_rec_T = io_out_d_m1_rec_rawIn_sExp[8:6]; // @[recFNFromFN.scala:48:50] wire [2:0] _io_out_d_m1_rec_T_1 = io_out_d_m1_rec_rawIn_isZero_0 ? 3'h0 : _io_out_d_m1_rec_T; // @[recFNFromFN.scala:48:{15,50}] wire [2:0] _io_out_d_m1_rec_T_3 = {_io_out_d_m1_rec_T_1[2:1], _io_out_d_m1_rec_T_1[0] | _io_out_d_m1_rec_T_2}; // @[recFNFromFN.scala:48:{15,76}, :49:20] wire [3:0] _io_out_d_m1_rec_T_4 = {io_out_d_m1_rec_rawIn_sign_0, _io_out_d_m1_rec_T_3}; // @[recFNFromFN.scala:47:20, :48:76] wire [5:0] _io_out_d_m1_rec_T_5 = io_out_d_m1_rec_rawIn_sExp[5:0]; // @[recFNFromFN.scala:50:23] wire [9:0] _io_out_d_m1_rec_T_6 = {_io_out_d_m1_rec_T_4, _io_out_d_m1_rec_T_5}; // @[recFNFromFN.scala:47:20, :49:45, :50:23] wire [22:0] _io_out_d_m1_rec_T_7 = io_out_d_m1_rec_rawIn_sig[22:0]; // @[recFNFromFN.scala:51:22] wire [32:0] io_out_d_m1_rec = {_io_out_d_m1_rec_T_6, _io_out_d_m1_rec_T_7}; // @[recFNFromFN.scala:49:45, :50:41, :51:22] wire io_out_d_m2_rec_rawIn_sign = io_in_b_bits_0[31]; // @[rawFloatFromFN.scala:44:18] wire io_out_d_m2_rec_rawIn_sign_0 = io_out_d_m2_rec_rawIn_sign; // @[rawFloatFromFN.scala:44:18, :63:19] wire [7:0] io_out_d_m2_rec_rawIn_expIn = io_in_b_bits_0[30:23]; // @[rawFloatFromFN.scala:45:19] wire [22:0] io_out_d_m2_rec_rawIn_fractIn = io_in_b_bits_0[22:0]; // @[rawFloatFromFN.scala:46:21] wire io_out_d_m2_rec_rawIn_isZeroExpIn = io_out_d_m2_rec_rawIn_expIn == 8'h0; // @[rawFloatFromFN.scala:45:19, :48:30] wire io_out_d_m2_rec_rawIn_isZeroFractIn = io_out_d_m2_rec_rawIn_fractIn == 23'h0; // @[rawFloatFromFN.scala:46:21, :49:34] wire _io_out_d_m2_rec_rawIn_normDist_T = io_out_d_m2_rec_rawIn_fractIn[0]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_1 = io_out_d_m2_rec_rawIn_fractIn[1]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_2 = io_out_d_m2_rec_rawIn_fractIn[2]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_3 = io_out_d_m2_rec_rawIn_fractIn[3]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_4 = io_out_d_m2_rec_rawIn_fractIn[4]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_5 = io_out_d_m2_rec_rawIn_fractIn[5]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_6 = io_out_d_m2_rec_rawIn_fractIn[6]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_7 = io_out_d_m2_rec_rawIn_fractIn[7]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_8 = io_out_d_m2_rec_rawIn_fractIn[8]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_9 = io_out_d_m2_rec_rawIn_fractIn[9]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_10 = io_out_d_m2_rec_rawIn_fractIn[10]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_11 = io_out_d_m2_rec_rawIn_fractIn[11]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_12 = io_out_d_m2_rec_rawIn_fractIn[12]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_13 = io_out_d_m2_rec_rawIn_fractIn[13]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_14 = io_out_d_m2_rec_rawIn_fractIn[14]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_15 = io_out_d_m2_rec_rawIn_fractIn[15]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_16 = io_out_d_m2_rec_rawIn_fractIn[16]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_17 = io_out_d_m2_rec_rawIn_fractIn[17]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_18 = io_out_d_m2_rec_rawIn_fractIn[18]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_19 = io_out_d_m2_rec_rawIn_fractIn[19]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_20 = io_out_d_m2_rec_rawIn_fractIn[20]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_21 = io_out_d_m2_rec_rawIn_fractIn[21]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_22 = io_out_d_m2_rec_rawIn_fractIn[22]; // @[rawFloatFromFN.scala:46:21] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_23 = _io_out_d_m2_rec_rawIn_normDist_T_1 ? 5'h15 : 5'h16; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_24 = _io_out_d_m2_rec_rawIn_normDist_T_2 ? 5'h14 : _io_out_d_m2_rec_rawIn_normDist_T_23; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_25 = _io_out_d_m2_rec_rawIn_normDist_T_3 ? 5'h13 : _io_out_d_m2_rec_rawIn_normDist_T_24; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_26 = _io_out_d_m2_rec_rawIn_normDist_T_4 ? 5'h12 : _io_out_d_m2_rec_rawIn_normDist_T_25; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_27 = _io_out_d_m2_rec_rawIn_normDist_T_5 ? 5'h11 : _io_out_d_m2_rec_rawIn_normDist_T_26; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_28 = _io_out_d_m2_rec_rawIn_normDist_T_6 ? 5'h10 : _io_out_d_m2_rec_rawIn_normDist_T_27; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_29 = _io_out_d_m2_rec_rawIn_normDist_T_7 ? 5'hF : _io_out_d_m2_rec_rawIn_normDist_T_28; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_30 = _io_out_d_m2_rec_rawIn_normDist_T_8 ? 5'hE : _io_out_d_m2_rec_rawIn_normDist_T_29; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_31 = _io_out_d_m2_rec_rawIn_normDist_T_9 ? 5'hD : _io_out_d_m2_rec_rawIn_normDist_T_30; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_32 = _io_out_d_m2_rec_rawIn_normDist_T_10 ? 5'hC : _io_out_d_m2_rec_rawIn_normDist_T_31; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_33 = _io_out_d_m2_rec_rawIn_normDist_T_11 ? 5'hB : _io_out_d_m2_rec_rawIn_normDist_T_32; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_34 = _io_out_d_m2_rec_rawIn_normDist_T_12 ? 5'hA : _io_out_d_m2_rec_rawIn_normDist_T_33; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_35 = _io_out_d_m2_rec_rawIn_normDist_T_13 ? 5'h9 : _io_out_d_m2_rec_rawIn_normDist_T_34; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_36 = _io_out_d_m2_rec_rawIn_normDist_T_14 ? 5'h8 : _io_out_d_m2_rec_rawIn_normDist_T_35; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_37 = _io_out_d_m2_rec_rawIn_normDist_T_15 ? 5'h7 : _io_out_d_m2_rec_rawIn_normDist_T_36; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_38 = _io_out_d_m2_rec_rawIn_normDist_T_16 ? 5'h6 : _io_out_d_m2_rec_rawIn_normDist_T_37; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_39 = _io_out_d_m2_rec_rawIn_normDist_T_17 ? 5'h5 : _io_out_d_m2_rec_rawIn_normDist_T_38; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_40 = _io_out_d_m2_rec_rawIn_normDist_T_18 ? 5'h4 : _io_out_d_m2_rec_rawIn_normDist_T_39; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_41 = _io_out_d_m2_rec_rawIn_normDist_T_19 ? 5'h3 : _io_out_d_m2_rec_rawIn_normDist_T_40; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_42 = _io_out_d_m2_rec_rawIn_normDist_T_20 ? 5'h2 : _io_out_d_m2_rec_rawIn_normDist_T_41; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_43 = _io_out_d_m2_rec_rawIn_normDist_T_21 ? 5'h1 : _io_out_d_m2_rec_rawIn_normDist_T_42; // @[Mux.scala:50:70] wire [4:0] io_out_d_m2_rec_rawIn_normDist = _io_out_d_m2_rec_rawIn_normDist_T_22 ? 5'h0 : _io_out_d_m2_rec_rawIn_normDist_T_43; // @[Mux.scala:50:70] wire [53:0] _io_out_d_m2_rec_rawIn_subnormFract_T = {31'h0, io_out_d_m2_rec_rawIn_fractIn} << io_out_d_m2_rec_rawIn_normDist; // @[Mux.scala:50:70] wire [21:0] _io_out_d_m2_rec_rawIn_subnormFract_T_1 = _io_out_d_m2_rec_rawIn_subnormFract_T[21:0]; // @[rawFloatFromFN.scala:52:{33,46}] wire [22:0] io_out_d_m2_rec_rawIn_subnormFract = {_io_out_d_m2_rec_rawIn_subnormFract_T_1, 1'h0}; // @[rawFloatFromFN.scala:52:{46,64}] wire [8:0] _io_out_d_m2_rec_rawIn_adjustedExp_T = {4'hF, ~io_out_d_m2_rec_rawIn_normDist}; // @[Mux.scala:50:70] wire [8:0] _io_out_d_m2_rec_rawIn_adjustedExp_T_1 = io_out_d_m2_rec_rawIn_isZeroExpIn ? _io_out_d_m2_rec_rawIn_adjustedExp_T : {1'h0, io_out_d_m2_rec_rawIn_expIn}; // @[rawFloatFromFN.scala:45:19, :48:30, :54:10, :55:18] wire [1:0] _io_out_d_m2_rec_rawIn_adjustedExp_T_2 = io_out_d_m2_rec_rawIn_isZeroExpIn ? 2'h2 : 2'h1; // @[rawFloatFromFN.scala:48:30, :58:14] wire [7:0] _io_out_d_m2_rec_rawIn_adjustedExp_T_3 = {6'h20, _io_out_d_m2_rec_rawIn_adjustedExp_T_2}; // @[rawFloatFromFN.scala:58:{9,14}] wire [9:0] _io_out_d_m2_rec_rawIn_adjustedExp_T_4 = {1'h0, _io_out_d_m2_rec_rawIn_adjustedExp_T_1} + {2'h0, _io_out_d_m2_rec_rawIn_adjustedExp_T_3}; // @[rawFloatFromFN.scala:54:10, :57:9, :58:9] wire [8:0] io_out_d_m2_rec_rawIn_adjustedExp = _io_out_d_m2_rec_rawIn_adjustedExp_T_4[8:0]; // @[rawFloatFromFN.scala:57:9] wire [8:0] _io_out_d_m2_rec_rawIn_out_sExp_T = io_out_d_m2_rec_rawIn_adjustedExp; // @[rawFloatFromFN.scala:57:9, :68:28] wire io_out_d_m2_rec_rawIn_isZero = io_out_d_m2_rec_rawIn_isZeroExpIn & io_out_d_m2_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:48:30, :49:34, :60:30] wire io_out_d_m2_rec_rawIn_isZero_0 = io_out_d_m2_rec_rawIn_isZero; // @[rawFloatFromFN.scala:60:30, :63:19] wire [1:0] _io_out_d_m2_rec_rawIn_isSpecial_T = io_out_d_m2_rec_rawIn_adjustedExp[8:7]; // @[rawFloatFromFN.scala:57:9, :61:32] wire io_out_d_m2_rec_rawIn_isSpecial = &_io_out_d_m2_rec_rawIn_isSpecial_T; // @[rawFloatFromFN.scala:61:{32,57}] wire _io_out_d_m2_rec_rawIn_out_isNaN_T_1; // @[rawFloatFromFN.scala:64:28] wire _io_out_d_m2_rec_rawIn_out_isInf_T; // @[rawFloatFromFN.scala:65:28] wire _io_out_d_m2_rec_T_2 = io_out_d_m2_rec_rawIn_isNaN; // @[recFNFromFN.scala:49:20] wire [9:0] _io_out_d_m2_rec_rawIn_out_sExp_T_1; // @[rawFloatFromFN.scala:68:42] wire [24:0] _io_out_d_m2_rec_rawIn_out_sig_T_3; // @[rawFloatFromFN.scala:70:27] wire io_out_d_m2_rec_rawIn_isInf; // @[rawFloatFromFN.scala:63:19] wire [9:0] io_out_d_m2_rec_rawIn_sExp; // @[rawFloatFromFN.scala:63:19] wire [24:0] io_out_d_m2_rec_rawIn_sig; // @[rawFloatFromFN.scala:63:19] wire _io_out_d_m2_rec_rawIn_out_isNaN_T = ~io_out_d_m2_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:49:34, :64:31] assign _io_out_d_m2_rec_rawIn_out_isNaN_T_1 = io_out_d_m2_rec_rawIn_isSpecial & _io_out_d_m2_rec_rawIn_out_isNaN_T; // @[rawFloatFromFN.scala:61:57, :64:{28,31}] assign io_out_d_m2_rec_rawIn_isNaN = _io_out_d_m2_rec_rawIn_out_isNaN_T_1; // @[rawFloatFromFN.scala:63:19, :64:28] assign _io_out_d_m2_rec_rawIn_out_isInf_T = io_out_d_m2_rec_rawIn_isSpecial & io_out_d_m2_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:49:34, :61:57, :65:28] assign io_out_d_m2_rec_rawIn_isInf = _io_out_d_m2_rec_rawIn_out_isInf_T; // @[rawFloatFromFN.scala:63:19, :65:28] assign _io_out_d_m2_rec_rawIn_out_sExp_T_1 = {1'h0, _io_out_d_m2_rec_rawIn_out_sExp_T}; // @[rawFloatFromFN.scala:68:{28,42}] assign io_out_d_m2_rec_rawIn_sExp = _io_out_d_m2_rec_rawIn_out_sExp_T_1; // @[rawFloatFromFN.scala:63:19, :68:42] wire _io_out_d_m2_rec_rawIn_out_sig_T = ~io_out_d_m2_rec_rawIn_isZero; // @[rawFloatFromFN.scala:60:30, :70:19] wire [1:0] _io_out_d_m2_rec_rawIn_out_sig_T_1 = {1'h0, _io_out_d_m2_rec_rawIn_out_sig_T}; // @[rawFloatFromFN.scala:70:{16,19}] wire [22:0] _io_out_d_m2_rec_rawIn_out_sig_T_2 = io_out_d_m2_rec_rawIn_isZeroExpIn ? io_out_d_m2_rec_rawIn_subnormFract : io_out_d_m2_rec_rawIn_fractIn; // @[rawFloatFromFN.scala:46:21, :48:30, :52:64, :70:33] assign _io_out_d_m2_rec_rawIn_out_sig_T_3 = {_io_out_d_m2_rec_rawIn_out_sig_T_1, _io_out_d_m2_rec_rawIn_out_sig_T_2}; // @[rawFloatFromFN.scala:70:{16,27,33}] assign io_out_d_m2_rec_rawIn_sig = _io_out_d_m2_rec_rawIn_out_sig_T_3; // @[rawFloatFromFN.scala:63:19, :70:27] wire [2:0] _io_out_d_m2_rec_T = io_out_d_m2_rec_rawIn_sExp[8:6]; // @[recFNFromFN.scala:48:50] wire [2:0] _io_out_d_m2_rec_T_1 = io_out_d_m2_rec_rawIn_isZero_0 ? 3'h0 : _io_out_d_m2_rec_T; // @[recFNFromFN.scala:48:{15,50}] wire [2:0] _io_out_d_m2_rec_T_3 = {_io_out_d_m2_rec_T_1[2:1], _io_out_d_m2_rec_T_1[0] | _io_out_d_m2_rec_T_2}; // @[recFNFromFN.scala:48:{15,76}, :49:20] wire [3:0] _io_out_d_m2_rec_T_4 = {io_out_d_m2_rec_rawIn_sign_0, _io_out_d_m2_rec_T_3}; // @[recFNFromFN.scala:47:20, :48:76] wire [5:0] _io_out_d_m2_rec_T_5 = io_out_d_m2_rec_rawIn_sExp[5:0]; // @[recFNFromFN.scala:50:23] wire [9:0] _io_out_d_m2_rec_T_6 = {_io_out_d_m2_rec_T_4, _io_out_d_m2_rec_T_5}; // @[recFNFromFN.scala:47:20, :49:45, :50:23] wire [22:0] _io_out_d_m2_rec_T_7 = io_out_d_m2_rec_rawIn_sig[22:0]; // @[recFNFromFN.scala:51:22] wire [32:0] io_out_d_m2_rec = {_io_out_d_m2_rec_T_6, _io_out_d_m2_rec_T_7}; // @[recFNFromFN.scala:49:45, :50:41, :51:22] wire io_out_d_self_rec_rawIn_sign = io_in_c_bits_0[31]; // @[rawFloatFromFN.scala:44:18] wire io_out_d_self_rec_rawIn_sign_0 = io_out_d_self_rec_rawIn_sign; // @[rawFloatFromFN.scala:44:18, :63:19] wire [7:0] io_out_d_self_rec_rawIn_expIn = io_in_c_bits_0[30:23]; // @[rawFloatFromFN.scala:45:19] wire [22:0] io_out_d_self_rec_rawIn_fractIn = io_in_c_bits_0[22:0]; // @[rawFloatFromFN.scala:46:21] wire io_out_d_self_rec_rawIn_isZeroExpIn = io_out_d_self_rec_rawIn_expIn == 8'h0; // @[rawFloatFromFN.scala:45:19, :48:30] wire io_out_d_self_rec_rawIn_isZeroFractIn = io_out_d_self_rec_rawIn_fractIn == 23'h0; // @[rawFloatFromFN.scala:46:21, :49:34] wire _io_out_d_self_rec_rawIn_normDist_T = io_out_d_self_rec_rawIn_fractIn[0]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_1 = io_out_d_self_rec_rawIn_fractIn[1]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_2 = io_out_d_self_rec_rawIn_fractIn[2]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_3 = io_out_d_self_rec_rawIn_fractIn[3]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_4 = io_out_d_self_rec_rawIn_fractIn[4]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_5 = io_out_d_self_rec_rawIn_fractIn[5]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_6 = io_out_d_self_rec_rawIn_fractIn[6]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_7 = io_out_d_self_rec_rawIn_fractIn[7]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_8 = io_out_d_self_rec_rawIn_fractIn[8]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_9 = io_out_d_self_rec_rawIn_fractIn[9]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_10 = io_out_d_self_rec_rawIn_fractIn[10]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_11 = io_out_d_self_rec_rawIn_fractIn[11]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_12 = io_out_d_self_rec_rawIn_fractIn[12]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_13 = io_out_d_self_rec_rawIn_fractIn[13]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_14 = io_out_d_self_rec_rawIn_fractIn[14]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_15 = io_out_d_self_rec_rawIn_fractIn[15]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_16 = io_out_d_self_rec_rawIn_fractIn[16]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_17 = io_out_d_self_rec_rawIn_fractIn[17]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_18 = io_out_d_self_rec_rawIn_fractIn[18]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_19 = io_out_d_self_rec_rawIn_fractIn[19]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_20 = io_out_d_self_rec_rawIn_fractIn[20]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_21 = io_out_d_self_rec_rawIn_fractIn[21]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_22 = io_out_d_self_rec_rawIn_fractIn[22]; // @[rawFloatFromFN.scala:46:21] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_23 = _io_out_d_self_rec_rawIn_normDist_T_1 ? 5'h15 : 5'h16; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_24 = _io_out_d_self_rec_rawIn_normDist_T_2 ? 5'h14 : _io_out_d_self_rec_rawIn_normDist_T_23; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_25 = _io_out_d_self_rec_rawIn_normDist_T_3 ? 5'h13 : _io_out_d_self_rec_rawIn_normDist_T_24; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_26 = _io_out_d_self_rec_rawIn_normDist_T_4 ? 5'h12 : _io_out_d_self_rec_rawIn_normDist_T_25; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_27 = _io_out_d_self_rec_rawIn_normDist_T_5 ? 5'h11 : _io_out_d_self_rec_rawIn_normDist_T_26; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_28 = _io_out_d_self_rec_rawIn_normDist_T_6 ? 5'h10 : _io_out_d_self_rec_rawIn_normDist_T_27; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_29 = _io_out_d_self_rec_rawIn_normDist_T_7 ? 5'hF : _io_out_d_self_rec_rawIn_normDist_T_28; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_30 = _io_out_d_self_rec_rawIn_normDist_T_8 ? 5'hE : _io_out_d_self_rec_rawIn_normDist_T_29; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_31 = _io_out_d_self_rec_rawIn_normDist_T_9 ? 5'hD : _io_out_d_self_rec_rawIn_normDist_T_30; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_32 = _io_out_d_self_rec_rawIn_normDist_T_10 ? 5'hC : _io_out_d_self_rec_rawIn_normDist_T_31; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_33 = _io_out_d_self_rec_rawIn_normDist_T_11 ? 5'hB : _io_out_d_self_rec_rawIn_normDist_T_32; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_34 = _io_out_d_self_rec_rawIn_normDist_T_12 ? 5'hA : _io_out_d_self_rec_rawIn_normDist_T_33; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_35 = _io_out_d_self_rec_rawIn_normDist_T_13 ? 5'h9 : _io_out_d_self_rec_rawIn_normDist_T_34; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_36 = _io_out_d_self_rec_rawIn_normDist_T_14 ? 5'h8 : _io_out_d_self_rec_rawIn_normDist_T_35; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_37 = _io_out_d_self_rec_rawIn_normDist_T_15 ? 5'h7 : _io_out_d_self_rec_rawIn_normDist_T_36; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_38 = _io_out_d_self_rec_rawIn_normDist_T_16 ? 5'h6 : _io_out_d_self_rec_rawIn_normDist_T_37; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_39 = _io_out_d_self_rec_rawIn_normDist_T_17 ? 5'h5 : _io_out_d_self_rec_rawIn_normDist_T_38; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_40 = _io_out_d_self_rec_rawIn_normDist_T_18 ? 5'h4 : _io_out_d_self_rec_rawIn_normDist_T_39; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_41 = _io_out_d_self_rec_rawIn_normDist_T_19 ? 5'h3 : _io_out_d_self_rec_rawIn_normDist_T_40; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_42 = _io_out_d_self_rec_rawIn_normDist_T_20 ? 5'h2 : _io_out_d_self_rec_rawIn_normDist_T_41; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_43 = _io_out_d_self_rec_rawIn_normDist_T_21 ? 5'h1 : _io_out_d_self_rec_rawIn_normDist_T_42; // @[Mux.scala:50:70] wire [4:0] io_out_d_self_rec_rawIn_normDist = _io_out_d_self_rec_rawIn_normDist_T_22 ? 5'h0 : _io_out_d_self_rec_rawIn_normDist_T_43; // @[Mux.scala:50:70] wire [53:0] _io_out_d_self_rec_rawIn_subnormFract_T = {31'h0, io_out_d_self_rec_rawIn_fractIn} << io_out_d_self_rec_rawIn_normDist; // @[Mux.scala:50:70] wire [21:0] _io_out_d_self_rec_rawIn_subnormFract_T_1 = _io_out_d_self_rec_rawIn_subnormFract_T[21:0]; // @[rawFloatFromFN.scala:52:{33,46}] wire [22:0] io_out_d_self_rec_rawIn_subnormFract = {_io_out_d_self_rec_rawIn_subnormFract_T_1, 1'h0}; // @[rawFloatFromFN.scala:52:{46,64}] wire [8:0] _io_out_d_self_rec_rawIn_adjustedExp_T = {4'hF, ~io_out_d_self_rec_rawIn_normDist}; // @[Mux.scala:50:70] wire [8:0] _io_out_d_self_rec_rawIn_adjustedExp_T_1 = io_out_d_self_rec_rawIn_isZeroExpIn ? _io_out_d_self_rec_rawIn_adjustedExp_T : {1'h0, io_out_d_self_rec_rawIn_expIn}; // @[rawFloatFromFN.scala:45:19, :48:30, :54:10, :55:18] wire [1:0] _io_out_d_self_rec_rawIn_adjustedExp_T_2 = io_out_d_self_rec_rawIn_isZeroExpIn ? 2'h2 : 2'h1; // @[rawFloatFromFN.scala:48:30, :58:14] wire [7:0] _io_out_d_self_rec_rawIn_adjustedExp_T_3 = {6'h20, _io_out_d_self_rec_rawIn_adjustedExp_T_2}; // @[rawFloatFromFN.scala:58:{9,14}] wire [9:0] _io_out_d_self_rec_rawIn_adjustedExp_T_4 = {1'h0, _io_out_d_self_rec_rawIn_adjustedExp_T_1} + {2'h0, _io_out_d_self_rec_rawIn_adjustedExp_T_3}; // @[rawFloatFromFN.scala:54:10, :57:9, :58:9] wire [8:0] io_out_d_self_rec_rawIn_adjustedExp = _io_out_d_self_rec_rawIn_adjustedExp_T_4[8:0]; // @[rawFloatFromFN.scala:57:9] wire [8:0] _io_out_d_self_rec_rawIn_out_sExp_T = io_out_d_self_rec_rawIn_adjustedExp; // @[rawFloatFromFN.scala:57:9, :68:28] wire io_out_d_self_rec_rawIn_isZero = io_out_d_self_rec_rawIn_isZeroExpIn & io_out_d_self_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:48:30, :49:34, :60:30] wire io_out_d_self_rec_rawIn_isZero_0 = io_out_d_self_rec_rawIn_isZero; // @[rawFloatFromFN.scala:60:30, :63:19] wire [1:0] _io_out_d_self_rec_rawIn_isSpecial_T = io_out_d_self_rec_rawIn_adjustedExp[8:7]; // @[rawFloatFromFN.scala:57:9, :61:32] wire io_out_d_self_rec_rawIn_isSpecial = &_io_out_d_self_rec_rawIn_isSpecial_T; // @[rawFloatFromFN.scala:61:{32,57}] wire _io_out_d_self_rec_rawIn_out_isNaN_T_1; // @[rawFloatFromFN.scala:64:28] wire _io_out_d_self_rec_rawIn_out_isInf_T; // @[rawFloatFromFN.scala:65:28] wire _io_out_d_self_rec_T_2 = io_out_d_self_rec_rawIn_isNaN; // @[recFNFromFN.scala:49:20] wire [9:0] _io_out_d_self_rec_rawIn_out_sExp_T_1; // @[rawFloatFromFN.scala:68:42] wire [24:0] _io_out_d_self_rec_rawIn_out_sig_T_3; // @[rawFloatFromFN.scala:70:27] wire io_out_d_self_rec_rawIn_isInf; // @[rawFloatFromFN.scala:63:19] wire [9:0] io_out_d_self_rec_rawIn_sExp; // @[rawFloatFromFN.scala:63:19] wire [24:0] io_out_d_self_rec_rawIn_sig; // @[rawFloatFromFN.scala:63:19] wire _io_out_d_self_rec_rawIn_out_isNaN_T = ~io_out_d_self_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:49:34, :64:31] assign _io_out_d_self_rec_rawIn_out_isNaN_T_1 = io_out_d_self_rec_rawIn_isSpecial & _io_out_d_self_rec_rawIn_out_isNaN_T; // @[rawFloatFromFN.scala:61:57, :64:{28,31}] assign io_out_d_self_rec_rawIn_isNaN = _io_out_d_self_rec_rawIn_out_isNaN_T_1; // @[rawFloatFromFN.scala:63:19, :64:28] assign _io_out_d_self_rec_rawIn_out_isInf_T = io_out_d_self_rec_rawIn_isSpecial & io_out_d_self_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:49:34, :61:57, :65:28] assign io_out_d_self_rec_rawIn_isInf = _io_out_d_self_rec_rawIn_out_isInf_T; // @[rawFloatFromFN.scala:63:19, :65:28] assign _io_out_d_self_rec_rawIn_out_sExp_T_1 = {1'h0, _io_out_d_self_rec_rawIn_out_sExp_T}; // @[rawFloatFromFN.scala:68:{28,42}] assign io_out_d_self_rec_rawIn_sExp = _io_out_d_self_rec_rawIn_out_sExp_T_1; // @[rawFloatFromFN.scala:63:19, :68:42] wire _io_out_d_self_rec_rawIn_out_sig_T = ~io_out_d_self_rec_rawIn_isZero; // @[rawFloatFromFN.scala:60:30, :70:19] wire [1:0] _io_out_d_self_rec_rawIn_out_sig_T_1 = {1'h0, _io_out_d_self_rec_rawIn_out_sig_T}; // @[rawFloatFromFN.scala:70:{16,19}] wire [22:0] _io_out_d_self_rec_rawIn_out_sig_T_2 = io_out_d_self_rec_rawIn_isZeroExpIn ? io_out_d_self_rec_rawIn_subnormFract : io_out_d_self_rec_rawIn_fractIn; // @[rawFloatFromFN.scala:46:21, :48:30, :52:64, :70:33] assign _io_out_d_self_rec_rawIn_out_sig_T_3 = {_io_out_d_self_rec_rawIn_out_sig_T_1, _io_out_d_self_rec_rawIn_out_sig_T_2}; // @[rawFloatFromFN.scala:70:{16,27,33}] assign io_out_d_self_rec_rawIn_sig = _io_out_d_self_rec_rawIn_out_sig_T_3; // @[rawFloatFromFN.scala:63:19, :70:27] wire [2:0] _io_out_d_self_rec_T = io_out_d_self_rec_rawIn_sExp[8:6]; // @[recFNFromFN.scala:48:50] wire [2:0] _io_out_d_self_rec_T_1 = io_out_d_self_rec_rawIn_isZero_0 ? 3'h0 : _io_out_d_self_rec_T; // @[recFNFromFN.scala:48:{15,50}] wire [2:0] _io_out_d_self_rec_T_3 = {_io_out_d_self_rec_T_1[2:1], _io_out_d_self_rec_T_1[0] | _io_out_d_self_rec_T_2}; // @[recFNFromFN.scala:48:{15,76}, :49:20] wire [3:0] _io_out_d_self_rec_T_4 = {io_out_d_self_rec_rawIn_sign_0, _io_out_d_self_rec_T_3}; // @[recFNFromFN.scala:47:20, :48:76] wire [5:0] _io_out_d_self_rec_T_5 = io_out_d_self_rec_rawIn_sExp[5:0]; // @[recFNFromFN.scala:50:23] wire [9:0] _io_out_d_self_rec_T_6 = {_io_out_d_self_rec_T_4, _io_out_d_self_rec_T_5}; // @[recFNFromFN.scala:47:20, :49:45, :50:23] wire [22:0] _io_out_d_self_rec_T_7 = io_out_d_self_rec_rawIn_sig[22:0]; // @[recFNFromFN.scala:51:22] wire [32:0] io_out_d_self_rec = {_io_out_d_self_rec_T_6, _io_out_d_self_rec_T_7}; // @[recFNFromFN.scala:49:45, :50:41, :51:22] wire [31:0] _io_out_d_out_bits_T; // @[fNFromRecFN.scala:66:12] assign io_out_d_bits_0 = io_out_d_out_bits; // @[PE.scala:14:7] wire [8:0] io_out_d_out_bits_rawIn_exp = _io_out_d_muladder_io_out[31:23]; // @[rawFloatFromRecFN.scala:51:21] wire [2:0] _io_out_d_out_bits_rawIn_isZero_T = io_out_d_out_bits_rawIn_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28] wire io_out_d_out_bits_rawIn_isZero = _io_out_d_out_bits_rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}] wire io_out_d_out_bits_rawIn_isZero_0 = io_out_d_out_bits_rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23] wire [1:0] _io_out_d_out_bits_rawIn_isSpecial_T = io_out_d_out_bits_rawIn_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28] wire io_out_d_out_bits_rawIn_isSpecial = &_io_out_d_out_bits_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}] wire _io_out_d_out_bits_rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33] wire _io_out_d_out_bits_rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33] wire _io_out_d_out_bits_rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25] wire [9:0] _io_out_d_out_bits_rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27] wire [24:0] _io_out_d_out_bits_rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44] wire io_out_d_out_bits_rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23] wire io_out_d_out_bits_rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23] wire io_out_d_out_bits_rawIn_sign; // @[rawFloatFromRecFN.scala:55:23] wire [9:0] io_out_d_out_bits_rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23] wire [24:0] io_out_d_out_bits_rawIn_sig; // @[rawFloatFromRecFN.scala:55:23] wire _io_out_d_out_bits_rawIn_out_isNaN_T = io_out_d_out_bits_rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41] wire _io_out_d_out_bits_rawIn_out_isInf_T = io_out_d_out_bits_rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41] assign _io_out_d_out_bits_rawIn_out_isNaN_T_1 = io_out_d_out_bits_rawIn_isSpecial & _io_out_d_out_bits_rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}] assign io_out_d_out_bits_rawIn_isNaN = _io_out_d_out_bits_rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33] wire _io_out_d_out_bits_rawIn_out_isInf_T_1 = ~_io_out_d_out_bits_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}] assign _io_out_d_out_bits_rawIn_out_isInf_T_2 = io_out_d_out_bits_rawIn_isSpecial & _io_out_d_out_bits_rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}] assign io_out_d_out_bits_rawIn_isInf = _io_out_d_out_bits_rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33] assign _io_out_d_out_bits_rawIn_out_sign_T = _io_out_d_muladder_io_out[32]; // @[rawFloatFromRecFN.scala:59:25] assign io_out_d_out_bits_rawIn_sign = _io_out_d_out_bits_rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25] assign _io_out_d_out_bits_rawIn_out_sExp_T = {1'h0, io_out_d_out_bits_rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27] assign io_out_d_out_bits_rawIn_sExp = _io_out_d_out_bits_rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27] wire _io_out_d_out_bits_rawIn_out_sig_T = ~io_out_d_out_bits_rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35] wire [1:0] _io_out_d_out_bits_rawIn_out_sig_T_1 = {1'h0, _io_out_d_out_bits_rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}] wire [22:0] _io_out_d_out_bits_rawIn_out_sig_T_2 = _io_out_d_muladder_io_out[22:0]; // @[rawFloatFromRecFN.scala:61:49] assign _io_out_d_out_bits_rawIn_out_sig_T_3 = {_io_out_d_out_bits_rawIn_out_sig_T_1, _io_out_d_out_bits_rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}] assign io_out_d_out_bits_rawIn_sig = _io_out_d_out_bits_rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44] wire io_out_d_out_bits_isSubnormal = $signed(io_out_d_out_bits_rawIn_sExp) < 10'sh82; // @[rawFloatFromRecFN.scala:55:23] wire [4:0] _io_out_d_out_bits_denormShiftDist_T = io_out_d_out_bits_rawIn_sExp[4:0]; // @[rawFloatFromRecFN.scala:55:23] wire [5:0] _io_out_d_out_bits_denormShiftDist_T_1 = 6'h1 - {1'h0, _io_out_d_out_bits_denormShiftDist_T}; // @[fNFromRecFN.scala:52:{35,47}] wire [4:0] io_out_d_out_bits_denormShiftDist = _io_out_d_out_bits_denormShiftDist_T_1[4:0]; // @[fNFromRecFN.scala:52:35] wire [23:0] _io_out_d_out_bits_denormFract_T = io_out_d_out_bits_rawIn_sig[24:1]; // @[rawFloatFromRecFN.scala:55:23] wire [23:0] _io_out_d_out_bits_denormFract_T_1 = _io_out_d_out_bits_denormFract_T >> io_out_d_out_bits_denormShiftDist; // @[fNFromRecFN.scala:52:35, :53:{38,42}] wire [22:0] io_out_d_out_bits_denormFract = _io_out_d_out_bits_denormFract_T_1[22:0]; // @[fNFromRecFN.scala:53:{42,60}] wire [7:0] _io_out_d_out_bits_expOut_T = io_out_d_out_bits_rawIn_sExp[7:0]; // @[rawFloatFromRecFN.scala:55:23] wire [8:0] _io_out_d_out_bits_expOut_T_1 = {1'h0, _io_out_d_out_bits_expOut_T} - 9'h81; // @[fNFromRecFN.scala:58:{27,45}] wire [7:0] _io_out_d_out_bits_expOut_T_2 = _io_out_d_out_bits_expOut_T_1[7:0]; // @[fNFromRecFN.scala:58:45] wire [7:0] _io_out_d_out_bits_expOut_T_3 = io_out_d_out_bits_isSubnormal ? 8'h0 : _io_out_d_out_bits_expOut_T_2; // @[fNFromRecFN.scala:51:38, :56:16, :58:45] wire _io_out_d_out_bits_expOut_T_4 = io_out_d_out_bits_rawIn_isNaN | io_out_d_out_bits_rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23] wire [7:0] _io_out_d_out_bits_expOut_T_5 = {8{_io_out_d_out_bits_expOut_T_4}}; // @[fNFromRecFN.scala:60:{21,44}] wire [7:0] io_out_d_out_bits_expOut = _io_out_d_out_bits_expOut_T_3 | _io_out_d_out_bits_expOut_T_5; // @[fNFromRecFN.scala:56:16, :60:{15,21}] wire [22:0] _io_out_d_out_bits_fractOut_T = io_out_d_out_bits_rawIn_sig[22:0]; // @[rawFloatFromRecFN.scala:55:23] wire [22:0] _io_out_d_out_bits_fractOut_T_1 = io_out_d_out_bits_rawIn_isInf ? 23'h0 : _io_out_d_out_bits_fractOut_T; // @[rawFloatFromRecFN.scala:55:23] wire [22:0] io_out_d_out_bits_fractOut = io_out_d_out_bits_isSubnormal ? io_out_d_out_bits_denormFract : _io_out_d_out_bits_fractOut_T_1; // @[fNFromRecFN.scala:51:38, :53:60, :62:16, :64:20] wire [8:0] io_out_d_out_bits_hi = {io_out_d_out_bits_rawIn_sign, io_out_d_out_bits_expOut}; // @[rawFloatFromRecFN.scala:55:23] assign _io_out_d_out_bits_T = {io_out_d_out_bits_hi, io_out_d_out_bits_fractOut}; // @[fNFromRecFN.scala:62:16, :66:12] assign io_out_d_out_bits = _io_out_d_out_bits_T; // @[fNFromRecFN.scala:66:12] RecFNToRecFN_198 io_out_d_m1_resizer ( // @[Arithmetic.scala:362:32] .io_in (io_out_d_m1_rec), // @[recFNFromFN.scala:50:41] .io_out (_io_out_d_m1_resizer_io_out) ); // @[Arithmetic.scala:362:32] RecFNToRecFN_199 io_out_d_m2_resizer ( // @[Arithmetic.scala:369:32] .io_in (io_out_d_m2_rec), // @[recFNFromFN.scala:50:41] .io_out (_io_out_d_m2_resizer_io_out) ); // @[Arithmetic.scala:369:32] MulAddRecFN_e8_s24_73 io_out_d_muladder ( // @[Arithmetic.scala:376:30] .io_a (_io_out_d_m1_resizer_io_out), // @[Arithmetic.scala:362:32] .io_b (_io_out_d_m2_resizer_io_out), // @[Arithmetic.scala:369:32] .io_c (io_out_d_self_rec), // @[recFNFromFN.scala:50:41] .io_out (_io_out_d_muladder_io_out) ); // @[Arithmetic.scala:376:30] assign io_out_d_bits = io_out_d_bits_0; // @[PE.scala:14:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File primitives.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object lowMask { def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt = { require(topBound != bottomBound) val numInVals = BigInt(1)<<in.getWidth if (topBound < bottomBound) { lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound) } else if (numInVals > 64 /* Empirical */) { // For simulation performance, we should avoid generating // exteremely wide shifters, so we divide and conquer. // Empirically, this does not impact synthesis QoR. val mid = numInVals / 2 val msb = in(in.getWidth - 1) val lsbs = in(in.getWidth - 2, 0) if (mid < topBound) { if (mid <= bottomBound) { Mux(msb, lowMask(lsbs, topBound - mid, bottomBound - mid), 0.U ) } else { Mux(msb, lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U, lowMask(lsbs, mid, bottomBound) ) } } else { ~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound)) } } else { val shift = (BigInt(-1)<<numInVals.toInt).S>>in Reverse( shift( (numInVals - 1 - bottomBound).toInt, (numInVals - topBound).toInt ) ) } } } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object countLeadingZeros { def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse) } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object orReduceBy2 { def apply(in: UInt): UInt = { val reducedWidth = (in.getWidth + 1)>>1 val reducedVec = Wire(Vec(reducedWidth, Bool())) for (ix <- 0 until reducedWidth - 1) { reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR } reducedVec(reducedWidth - 1) := in(in.getWidth - 1, (reducedWidth - 1) * 2).orR reducedVec.asUInt } } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object orReduceBy4 { def apply(in: UInt): UInt = { val reducedWidth = (in.getWidth + 3)>>2 val reducedVec = Wire(Vec(reducedWidth, Bool())) for (ix <- 0 until reducedWidth - 1) { reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR } reducedVec(reducedWidth - 1) := in(in.getWidth - 1, (reducedWidth - 1) * 4).orR reducedVec.asUInt } } File MulAddRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ import consts._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class MulAddRecFN_interIo(expWidth: Int, sigWidth: Int) extends Bundle { //*** ENCODE SOME OF THESE CASES IN FEWER BITS?: val isSigNaNAny = Bool() val isNaNAOrB = Bool() val isInfA = Bool() val isZeroA = Bool() val isInfB = Bool() val isZeroB = Bool() val signProd = Bool() val isNaNC = Bool() val isInfC = Bool() val isZeroC = Bool() val sExpSum = SInt((expWidth + 2).W) val doSubMags = Bool() val CIsDominant = Bool() val CDom_CAlignDist = UInt(log2Ceil(sigWidth + 1).W) val highAlignedSigC = UInt((sigWidth + 2).W) val bit0AlignedSigC = UInt(1.W) } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class MulAddRecFNToRaw_preMul(expWidth: Int, sigWidth: Int) extends RawModule { override def desiredName = s"MulAddRecFNToRaw_preMul_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val op = Input(Bits(2.W)) val a = Input(Bits((expWidth + sigWidth + 1).W)) val b = Input(Bits((expWidth + sigWidth + 1).W)) val c = Input(Bits((expWidth + sigWidth + 1).W)) val mulAddA = Output(UInt(sigWidth.W)) val mulAddB = Output(UInt(sigWidth.W)) val mulAddC = Output(UInt((sigWidth * 2).W)) val toPostMul = Output(new MulAddRecFN_interIo(expWidth, sigWidth)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ //*** POSSIBLE TO REDUCE THIS BY 1 OR 2 BITS? (CURRENTLY 2 BITS BETWEEN //*** UNSHIFTED C AND PRODUCT): val sigSumWidth = sigWidth * 3 + 3 //------------------------------------------------------------------------ //------------------------------------------------------------------------ val rawA = rawFloatFromRecFN(expWidth, sigWidth, io.a) val rawB = rawFloatFromRecFN(expWidth, sigWidth, io.b) val rawC = rawFloatFromRecFN(expWidth, sigWidth, io.c) val signProd = rawA.sign ^ rawB.sign ^ io.op(1) //*** REVIEW THE BIAS FOR 'sExpAlignedProd': val sExpAlignedProd = rawA.sExp +& rawB.sExp + (-(BigInt(1)<<expWidth) + sigWidth + 3).S val doSubMags = signProd ^ rawC.sign ^ io.op(0) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sNatCAlignDist = sExpAlignedProd - rawC.sExp val posNatCAlignDist = sNatCAlignDist(expWidth + 1, 0) val isMinCAlign = rawA.isZero || rawB.isZero || (sNatCAlignDist < 0.S) val CIsDominant = ! rawC.isZero && (isMinCAlign || (posNatCAlignDist <= sigWidth.U)) val CAlignDist = Mux(isMinCAlign, 0.U, Mux(posNatCAlignDist < (sigSumWidth - 1).U, posNatCAlignDist(log2Ceil(sigSumWidth) - 1, 0), (sigSumWidth - 1).U ) ) val mainAlignedSigC = (Mux(doSubMags, ~rawC.sig, rawC.sig) ## Fill(sigSumWidth - sigWidth + 2, doSubMags)).asSInt>>CAlignDist val reduced4CExtra = (orReduceBy4(rawC.sig<<((sigSumWidth - sigWidth - 1) & 3)) & lowMask( CAlignDist>>2, //*** NOT NEEDED?: // (sigSumWidth + 2)>>2, (sigSumWidth - 1)>>2, (sigSumWidth - sigWidth - 1)>>2 ) ).orR val alignedSigC = Cat(mainAlignedSigC>>3, Mux(doSubMags, mainAlignedSigC(2, 0).andR && ! reduced4CExtra, mainAlignedSigC(2, 0).orR || reduced4CExtra ) ) //------------------------------------------------------------------------ //------------------------------------------------------------------------ io.mulAddA := rawA.sig io.mulAddB := rawB.sig io.mulAddC := alignedSigC(sigWidth * 2, 1) io.toPostMul.isSigNaNAny := isSigNaNRawFloat(rawA) || isSigNaNRawFloat(rawB) || isSigNaNRawFloat(rawC) io.toPostMul.isNaNAOrB := rawA.isNaN || rawB.isNaN io.toPostMul.isInfA := rawA.isInf io.toPostMul.isZeroA := rawA.isZero io.toPostMul.isInfB := rawB.isInf io.toPostMul.isZeroB := rawB.isZero io.toPostMul.signProd := signProd io.toPostMul.isNaNC := rawC.isNaN io.toPostMul.isInfC := rawC.isInf io.toPostMul.isZeroC := rawC.isZero io.toPostMul.sExpSum := Mux(CIsDominant, rawC.sExp, sExpAlignedProd - sigWidth.S) io.toPostMul.doSubMags := doSubMags io.toPostMul.CIsDominant := CIsDominant io.toPostMul.CDom_CAlignDist := CAlignDist(log2Ceil(sigWidth + 1) - 1, 0) io.toPostMul.highAlignedSigC := alignedSigC(sigSumWidth - 1, sigWidth * 2 + 1) io.toPostMul.bit0AlignedSigC := alignedSigC(0) } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class MulAddRecFNToRaw_postMul(expWidth: Int, sigWidth: Int) extends RawModule { override def desiredName = s"MulAddRecFNToRaw_postMul_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val fromPreMul = Input(new MulAddRecFN_interIo(expWidth, sigWidth)) val mulAddResult = Input(UInt((sigWidth * 2 + 1).W)) val roundingMode = Input(UInt(3.W)) val invalidExc = Output(Bool()) val rawOut = Output(new RawFloat(expWidth, sigWidth + 2)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sigSumWidth = sigWidth * 3 + 3 //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundingMode_min = (io.roundingMode === round_min) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val opSignC = io.fromPreMul.signProd ^ io.fromPreMul.doSubMags val sigSum = Cat(Mux(io.mulAddResult(sigWidth * 2), io.fromPreMul.highAlignedSigC + 1.U, io.fromPreMul.highAlignedSigC ), io.mulAddResult(sigWidth * 2 - 1, 0), io.fromPreMul.bit0AlignedSigC ) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val CDom_sign = opSignC val CDom_sExp = io.fromPreMul.sExpSum - io.fromPreMul.doSubMags.zext val CDom_absSigSum = Mux(io.fromPreMul.doSubMags, ~sigSum(sigSumWidth - 1, sigWidth + 1), 0.U(1.W) ## //*** IF GAP IS REDUCED TO 1 BIT, MUST REDUCE THIS COMPONENT TO 1 BIT TOO: io.fromPreMul.highAlignedSigC(sigWidth + 1, sigWidth) ## sigSum(sigSumWidth - 3, sigWidth + 2) ) val CDom_absSigSumExtra = Mux(io.fromPreMul.doSubMags, (~sigSum(sigWidth, 1)).orR, sigSum(sigWidth + 1, 1).orR ) val CDom_mainSig = (CDom_absSigSum<<io.fromPreMul.CDom_CAlignDist)( sigWidth * 2 + 1, sigWidth - 3) val CDom_reduced4SigExtra = (orReduceBy4(CDom_absSigSum(sigWidth - 1, 0)<<(~sigWidth & 3)) & lowMask(io.fromPreMul.CDom_CAlignDist>>2, 0, sigWidth>>2)).orR val CDom_sig = Cat(CDom_mainSig>>3, CDom_mainSig(2, 0).orR || CDom_reduced4SigExtra || CDom_absSigSumExtra ) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val notCDom_signSigSum = sigSum(sigWidth * 2 + 3) val notCDom_absSigSum = Mux(notCDom_signSigSum, ~sigSum(sigWidth * 2 + 2, 0), sigSum(sigWidth * 2 + 2, 0) + io.fromPreMul.doSubMags ) val notCDom_reduced2AbsSigSum = orReduceBy2(notCDom_absSigSum) val notCDom_normDistReduced2 = countLeadingZeros(notCDom_reduced2AbsSigSum) val notCDom_nearNormDist = notCDom_normDistReduced2<<1 val notCDom_sExp = io.fromPreMul.sExpSum - notCDom_nearNormDist.asUInt.zext val notCDom_mainSig = (notCDom_absSigSum<<notCDom_nearNormDist)( sigWidth * 2 + 3, sigWidth - 1) val notCDom_reduced4SigExtra = (orReduceBy2( notCDom_reduced2AbsSigSum(sigWidth>>1, 0)<<((sigWidth>>1) & 1)) & lowMask(notCDom_normDistReduced2>>1, 0, (sigWidth + 2)>>2) ).orR val notCDom_sig = Cat(notCDom_mainSig>>3, notCDom_mainSig(2, 0).orR || notCDom_reduced4SigExtra ) val notCDom_completeCancellation = (notCDom_sig(sigWidth + 2, sigWidth + 1) === 0.U) val notCDom_sign = Mux(notCDom_completeCancellation, roundingMode_min, io.fromPreMul.signProd ^ notCDom_signSigSum ) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val notNaN_isInfProd = io.fromPreMul.isInfA || io.fromPreMul.isInfB val notNaN_isInfOut = notNaN_isInfProd || io.fromPreMul.isInfC val notNaN_addZeros = (io.fromPreMul.isZeroA || io.fromPreMul.isZeroB) && io.fromPreMul.isZeroC io.invalidExc := io.fromPreMul.isSigNaNAny || (io.fromPreMul.isInfA && io.fromPreMul.isZeroB) || (io.fromPreMul.isZeroA && io.fromPreMul.isInfB) || (! io.fromPreMul.isNaNAOrB && (io.fromPreMul.isInfA || io.fromPreMul.isInfB) && io.fromPreMul.isInfC && io.fromPreMul.doSubMags) io.rawOut.isNaN := io.fromPreMul.isNaNAOrB || io.fromPreMul.isNaNC io.rawOut.isInf := notNaN_isInfOut //*** IMPROVE?: io.rawOut.isZero := notNaN_addZeros || (! io.fromPreMul.CIsDominant && notCDom_completeCancellation) io.rawOut.sign := (notNaN_isInfProd && io.fromPreMul.signProd) || (io.fromPreMul.isInfC && opSignC) || (notNaN_addZeros && ! roundingMode_min && io.fromPreMul.signProd && opSignC) || (notNaN_addZeros && roundingMode_min && (io.fromPreMul.signProd || opSignC)) || (! notNaN_isInfOut && ! notNaN_addZeros && Mux(io.fromPreMul.CIsDominant, CDom_sign, notCDom_sign)) io.rawOut.sExp := Mux(io.fromPreMul.CIsDominant, CDom_sExp, notCDom_sExp) io.rawOut.sig := Mux(io.fromPreMul.CIsDominant, CDom_sig, notCDom_sig) } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class MulAddRecFN(expWidth: Int, sigWidth: Int) extends RawModule { override def desiredName = s"MulAddRecFN_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val op = Input(Bits(2.W)) val a = Input(Bits((expWidth + sigWidth + 1).W)) val b = Input(Bits((expWidth + sigWidth + 1).W)) val c = Input(Bits((expWidth + sigWidth + 1).W)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val mulAddRecFNToRaw_preMul = Module(new MulAddRecFNToRaw_preMul(expWidth, sigWidth)) val mulAddRecFNToRaw_postMul = Module(new MulAddRecFNToRaw_postMul(expWidth, sigWidth)) mulAddRecFNToRaw_preMul.io.op := io.op mulAddRecFNToRaw_preMul.io.a := io.a mulAddRecFNToRaw_preMul.io.b := io.b mulAddRecFNToRaw_preMul.io.c := io.c val mulAddResult = (mulAddRecFNToRaw_preMul.io.mulAddA * mulAddRecFNToRaw_preMul.io.mulAddB) +& mulAddRecFNToRaw_preMul.io.mulAddC mulAddRecFNToRaw_postMul.io.fromPreMul := mulAddRecFNToRaw_preMul.io.toPostMul mulAddRecFNToRaw_postMul.io.mulAddResult := mulAddResult mulAddRecFNToRaw_postMul.io.roundingMode := io.roundingMode //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundRawFNToRecFN = Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0)) roundRawFNToRecFN.io.invalidExc := mulAddRecFNToRaw_postMul.io.invalidExc roundRawFNToRecFN.io.infiniteExc := false.B roundRawFNToRecFN.io.in := mulAddRecFNToRaw_postMul.io.rawOut roundRawFNToRecFN.io.roundingMode := io.roundingMode roundRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundRawFNToRecFN.io.out io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags } File rawFloatFromRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ /*---------------------------------------------------------------------------- | In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be | set. *----------------------------------------------------------------------------*/ object rawFloatFromRecFN { def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat = { val exp = in(expWidth + sigWidth - 1, sigWidth - 1) val isZero = exp(expWidth, expWidth - 2) === 0.U val isSpecial = exp(expWidth, expWidth - 1) === 3.U val out = Wire(new RawFloat(expWidth, sigWidth)) out.isNaN := isSpecial && exp(expWidth - 2) out.isInf := isSpecial && ! exp(expWidth - 2) out.isZero := isZero out.sign := in(expWidth + sigWidth) out.sExp := exp.zext out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0) out } }
module MulAddRecFNToRaw_preMul_e5_s11( // @[MulAddRecFN.scala:71:7] input [1:0] io_op, // @[MulAddRecFN.scala:74:16] input [16:0] io_a, // @[MulAddRecFN.scala:74:16] input [16:0] io_b, // @[MulAddRecFN.scala:74:16] input [16:0] io_c, // @[MulAddRecFN.scala:74:16] output [10:0] io_mulAddA, // @[MulAddRecFN.scala:74:16] output [10:0] io_mulAddB, // @[MulAddRecFN.scala:74:16] output [21:0] io_mulAddC, // @[MulAddRecFN.scala:74:16] output io_toPostMul_isSigNaNAny, // @[MulAddRecFN.scala:74:16] output io_toPostMul_isNaNAOrB, // @[MulAddRecFN.scala:74:16] output io_toPostMul_isInfA, // @[MulAddRecFN.scala:74:16] output io_toPostMul_isZeroA, // @[MulAddRecFN.scala:74:16] output io_toPostMul_isInfB, // @[MulAddRecFN.scala:74:16] output io_toPostMul_isZeroB, // @[MulAddRecFN.scala:74:16] output io_toPostMul_signProd, // @[MulAddRecFN.scala:74:16] output io_toPostMul_isNaNC, // @[MulAddRecFN.scala:74:16] output io_toPostMul_isInfC, // @[MulAddRecFN.scala:74:16] output io_toPostMul_isZeroC, // @[MulAddRecFN.scala:74:16] output [6:0] io_toPostMul_sExpSum, // @[MulAddRecFN.scala:74:16] output io_toPostMul_doSubMags, // @[MulAddRecFN.scala:74:16] output io_toPostMul_CIsDominant, // @[MulAddRecFN.scala:74:16] output [3:0] io_toPostMul_CDom_CAlignDist, // @[MulAddRecFN.scala:74:16] output [12:0] io_toPostMul_highAlignedSigC, // @[MulAddRecFN.scala:74:16] output io_toPostMul_bit0AlignedSigC // @[MulAddRecFN.scala:74:16] ); wire [1:0] io_op_0 = io_op; // @[MulAddRecFN.scala:71:7] wire [16:0] io_a_0 = io_a; // @[MulAddRecFN.scala:71:7] wire [16:0] io_b_0 = io_b; // @[MulAddRecFN.scala:71:7] wire [16:0] io_c_0 = io_c; // @[MulAddRecFN.scala:71:7] wire [21:0] _io_mulAddC_T; // @[MulAddRecFN.scala:143:30] wire _io_toPostMul_isSigNaNAny_T_10; // @[MulAddRecFN.scala:146:58] wire _io_toPostMul_isNaNAOrB_T; // @[MulAddRecFN.scala:148:42] wire rawA_isInf; // @[rawFloatFromRecFN.scala:55:23] wire rawA_isZero; // @[rawFloatFromRecFN.scala:55:23] wire rawB_isInf; // @[rawFloatFromRecFN.scala:55:23] wire rawB_isZero; // @[rawFloatFromRecFN.scala:55:23] wire signProd; // @[MulAddRecFN.scala:97:42] wire rawC_isNaN; // @[rawFloatFromRecFN.scala:55:23] wire rawC_isInf; // @[rawFloatFromRecFN.scala:55:23] wire rawC_isZero; // @[rawFloatFromRecFN.scala:55:23] wire doSubMags; // @[MulAddRecFN.scala:102:42] wire CIsDominant; // @[MulAddRecFN.scala:110:23] wire [3:0] _io_toPostMul_CDom_CAlignDist_T; // @[MulAddRecFN.scala:161:47] wire [12:0] _io_toPostMul_highAlignedSigC_T; // @[MulAddRecFN.scala:163:20] wire _io_toPostMul_bit0AlignedSigC_T; // @[MulAddRecFN.scala:164:48] wire io_toPostMul_isSigNaNAny_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_isNaNAOrB_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_isInfA_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_isZeroA_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_isInfB_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_isZeroB_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_signProd_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_isNaNC_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_isInfC_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_isZeroC_0; // @[MulAddRecFN.scala:71:7] wire [6:0] io_toPostMul_sExpSum_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_doSubMags_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_CIsDominant_0; // @[MulAddRecFN.scala:71:7] wire [3:0] io_toPostMul_CDom_CAlignDist_0; // @[MulAddRecFN.scala:71:7] wire [12:0] io_toPostMul_highAlignedSigC_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_bit0AlignedSigC_0; // @[MulAddRecFN.scala:71:7] wire [10:0] io_mulAddA_0; // @[MulAddRecFN.scala:71:7] wire [10:0] io_mulAddB_0; // @[MulAddRecFN.scala:71:7] wire [21:0] io_mulAddC_0; // @[MulAddRecFN.scala:71:7] wire [5:0] rawA_exp = io_a_0[15:10]; // @[rawFloatFromRecFN.scala:51:21] wire [2:0] _rawA_isZero_T = rawA_exp[5:3]; // @[rawFloatFromRecFN.scala:51:21, :52:28] wire rawA_isZero_0 = _rawA_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}] assign rawA_isZero = rawA_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :55:23] wire [1:0] _rawA_isSpecial_T = rawA_exp[5:4]; // @[rawFloatFromRecFN.scala:51:21, :53:28] wire rawA_isSpecial = &_rawA_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}] wire _rawA_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33] wire _rawA_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33] assign io_toPostMul_isInfA_0 = rawA_isInf; // @[rawFloatFromRecFN.scala:55:23] assign io_toPostMul_isZeroA_0 = rawA_isZero; // @[rawFloatFromRecFN.scala:55:23] wire _rawA_out_sign_T; // @[rawFloatFromRecFN.scala:59:25] wire [6:0] _rawA_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27] wire [11:0] _rawA_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44] wire rawA_isNaN; // @[rawFloatFromRecFN.scala:55:23] wire rawA_sign; // @[rawFloatFromRecFN.scala:55:23] wire [6:0] rawA_sExp; // @[rawFloatFromRecFN.scala:55:23] wire [11:0] rawA_sig; // @[rawFloatFromRecFN.scala:55:23] wire _rawA_out_isNaN_T = rawA_exp[3]; // @[rawFloatFromRecFN.scala:51:21, :56:41] wire _rawA_out_isInf_T = rawA_exp[3]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41] assign _rawA_out_isNaN_T_1 = rawA_isSpecial & _rawA_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}] assign rawA_isNaN = _rawA_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33] wire _rawA_out_isInf_T_1 = ~_rawA_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}] assign _rawA_out_isInf_T_2 = rawA_isSpecial & _rawA_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}] assign rawA_isInf = _rawA_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33] assign _rawA_out_sign_T = io_a_0[16]; // @[rawFloatFromRecFN.scala:59:25] assign rawA_sign = _rawA_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25] assign _rawA_out_sExp_T = {1'h0, rawA_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27] assign rawA_sExp = _rawA_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27] wire _rawA_out_sig_T = ~rawA_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :61:35] wire [1:0] _rawA_out_sig_T_1 = {1'h0, _rawA_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}] wire [9:0] _rawA_out_sig_T_2 = io_a_0[9:0]; // @[rawFloatFromRecFN.scala:61:49] assign _rawA_out_sig_T_3 = {_rawA_out_sig_T_1, _rawA_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}] assign rawA_sig = _rawA_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44] wire [5:0] rawB_exp = io_b_0[15:10]; // @[rawFloatFromRecFN.scala:51:21] wire [2:0] _rawB_isZero_T = rawB_exp[5:3]; // @[rawFloatFromRecFN.scala:51:21, :52:28] wire rawB_isZero_0 = _rawB_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}] assign rawB_isZero = rawB_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :55:23] wire [1:0] _rawB_isSpecial_T = rawB_exp[5:4]; // @[rawFloatFromRecFN.scala:51:21, :53:28] wire rawB_isSpecial = &_rawB_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}] wire _rawB_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33] wire _rawB_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33] assign io_toPostMul_isInfB_0 = rawB_isInf; // @[rawFloatFromRecFN.scala:55:23] assign io_toPostMul_isZeroB_0 = rawB_isZero; // @[rawFloatFromRecFN.scala:55:23] wire _rawB_out_sign_T; // @[rawFloatFromRecFN.scala:59:25] wire [6:0] _rawB_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27] wire [11:0] _rawB_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44] wire rawB_isNaN; // @[rawFloatFromRecFN.scala:55:23] wire rawB_sign; // @[rawFloatFromRecFN.scala:55:23] wire [6:0] rawB_sExp; // @[rawFloatFromRecFN.scala:55:23] wire [11:0] rawB_sig; // @[rawFloatFromRecFN.scala:55:23] wire _rawB_out_isNaN_T = rawB_exp[3]; // @[rawFloatFromRecFN.scala:51:21, :56:41] wire _rawB_out_isInf_T = rawB_exp[3]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41] assign _rawB_out_isNaN_T_1 = rawB_isSpecial & _rawB_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}] assign rawB_isNaN = _rawB_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33] wire _rawB_out_isInf_T_1 = ~_rawB_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}] assign _rawB_out_isInf_T_2 = rawB_isSpecial & _rawB_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}] assign rawB_isInf = _rawB_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33] assign _rawB_out_sign_T = io_b_0[16]; // @[rawFloatFromRecFN.scala:59:25] assign rawB_sign = _rawB_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25] assign _rawB_out_sExp_T = {1'h0, rawB_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27] assign rawB_sExp = _rawB_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27] wire _rawB_out_sig_T = ~rawB_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :61:35] wire [1:0] _rawB_out_sig_T_1 = {1'h0, _rawB_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}] wire [9:0] _rawB_out_sig_T_2 = io_b_0[9:0]; // @[rawFloatFromRecFN.scala:61:49] assign _rawB_out_sig_T_3 = {_rawB_out_sig_T_1, _rawB_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}] assign rawB_sig = _rawB_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44] wire [5:0] rawC_exp = io_c_0[15:10]; // @[rawFloatFromRecFN.scala:51:21] wire [2:0] _rawC_isZero_T = rawC_exp[5:3]; // @[rawFloatFromRecFN.scala:51:21, :52:28] wire rawC_isZero_0 = _rawC_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}] assign rawC_isZero = rawC_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :55:23] wire [1:0] _rawC_isSpecial_T = rawC_exp[5:4]; // @[rawFloatFromRecFN.scala:51:21, :53:28] wire rawC_isSpecial = &_rawC_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}] wire _rawC_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33] assign io_toPostMul_isNaNC_0 = rawC_isNaN; // @[rawFloatFromRecFN.scala:55:23] wire _rawC_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33] assign io_toPostMul_isInfC_0 = rawC_isInf; // @[rawFloatFromRecFN.scala:55:23] assign io_toPostMul_isZeroC_0 = rawC_isZero; // @[rawFloatFromRecFN.scala:55:23] wire _rawC_out_sign_T; // @[rawFloatFromRecFN.scala:59:25] wire [6:0] _rawC_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27] wire [11:0] _rawC_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44] wire rawC_sign; // @[rawFloatFromRecFN.scala:55:23] wire [6:0] rawC_sExp; // @[rawFloatFromRecFN.scala:55:23] wire [11:0] rawC_sig; // @[rawFloatFromRecFN.scala:55:23] wire [11:0] _reduced4CExtra_T = rawC_sig; // @[rawFloatFromRecFN.scala:55:23] wire _rawC_out_isNaN_T = rawC_exp[3]; // @[rawFloatFromRecFN.scala:51:21, :56:41] wire _rawC_out_isInf_T = rawC_exp[3]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41] assign _rawC_out_isNaN_T_1 = rawC_isSpecial & _rawC_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}] assign rawC_isNaN = _rawC_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33] wire _rawC_out_isInf_T_1 = ~_rawC_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}] assign _rawC_out_isInf_T_2 = rawC_isSpecial & _rawC_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}] assign rawC_isInf = _rawC_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33] assign _rawC_out_sign_T = io_c_0[16]; // @[rawFloatFromRecFN.scala:59:25] assign rawC_sign = _rawC_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25] assign _rawC_out_sExp_T = {1'h0, rawC_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27] assign rawC_sExp = _rawC_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27] wire _rawC_out_sig_T = ~rawC_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :61:35] wire [1:0] _rawC_out_sig_T_1 = {1'h0, _rawC_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}] wire [9:0] _rawC_out_sig_T_2 = io_c_0[9:0]; // @[rawFloatFromRecFN.scala:61:49] assign _rawC_out_sig_T_3 = {_rawC_out_sig_T_1, _rawC_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}] assign rawC_sig = _rawC_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44] wire _signProd_T = rawA_sign ^ rawB_sign; // @[rawFloatFromRecFN.scala:55:23] wire _signProd_T_1 = io_op_0[1]; // @[MulAddRecFN.scala:71:7, :97:49] assign signProd = _signProd_T ^ _signProd_T_1; // @[MulAddRecFN.scala:97:{30,42,49}] assign io_toPostMul_signProd_0 = signProd; // @[MulAddRecFN.scala:71:7, :97:42] wire [7:0] _sExpAlignedProd_T = {rawA_sExp[6], rawA_sExp} + {rawB_sExp[6], rawB_sExp}; // @[rawFloatFromRecFN.scala:55:23] wire [8:0] _sExpAlignedProd_T_1 = {_sExpAlignedProd_T[7], _sExpAlignedProd_T} - 9'h12; // @[MulAddRecFN.scala:100:{19,32}] wire [7:0] _sExpAlignedProd_T_2 = _sExpAlignedProd_T_1[7:0]; // @[MulAddRecFN.scala:100:32] wire [7:0] sExpAlignedProd = _sExpAlignedProd_T_2; // @[MulAddRecFN.scala:100:32] wire _doSubMags_T = signProd ^ rawC_sign; // @[rawFloatFromRecFN.scala:55:23] wire _doSubMags_T_1 = io_op_0[0]; // @[MulAddRecFN.scala:71:7, :102:49] assign doSubMags = _doSubMags_T ^ _doSubMags_T_1; // @[MulAddRecFN.scala:102:{30,42,49}] assign io_toPostMul_doSubMags_0 = doSubMags; // @[MulAddRecFN.scala:71:7, :102:42] wire [8:0] _GEN = {sExpAlignedProd[7], sExpAlignedProd}; // @[MulAddRecFN.scala:100:32, :106:42] wire [8:0] _sNatCAlignDist_T = _GEN - {{2{rawC_sExp[6]}}, rawC_sExp}; // @[rawFloatFromRecFN.scala:55:23] wire [7:0] _sNatCAlignDist_T_1 = _sNatCAlignDist_T[7:0]; // @[MulAddRecFN.scala:106:42] wire [7:0] sNatCAlignDist = _sNatCAlignDist_T_1; // @[MulAddRecFN.scala:106:42] wire [6:0] posNatCAlignDist = sNatCAlignDist[6:0]; // @[MulAddRecFN.scala:106:42, :107:42] wire _isMinCAlign_T = rawA_isZero | rawB_isZero; // @[rawFloatFromRecFN.scala:55:23] wire _isMinCAlign_T_1 = $signed(sNatCAlignDist) < 8'sh0; // @[MulAddRecFN.scala:106:42, :108:69] wire isMinCAlign = _isMinCAlign_T | _isMinCAlign_T_1; // @[MulAddRecFN.scala:108:{35,50,69}] wire _CIsDominant_T = ~rawC_isZero; // @[rawFloatFromRecFN.scala:55:23] wire _CIsDominant_T_1 = posNatCAlignDist < 7'hC; // @[MulAddRecFN.scala:107:42, :110:60] wire _CIsDominant_T_2 = isMinCAlign | _CIsDominant_T_1; // @[MulAddRecFN.scala:108:50, :110:{39,60}] assign CIsDominant = _CIsDominant_T & _CIsDominant_T_2; // @[MulAddRecFN.scala:110:{9,23,39}] assign io_toPostMul_CIsDominant_0 = CIsDominant; // @[MulAddRecFN.scala:71:7, :110:23] wire _CAlignDist_T = posNatCAlignDist < 7'h23; // @[MulAddRecFN.scala:107:42, :114:34] wire [5:0] _CAlignDist_T_1 = posNatCAlignDist[5:0]; // @[MulAddRecFN.scala:107:42, :115:33] wire [5:0] _CAlignDist_T_2 = _CAlignDist_T ? _CAlignDist_T_1 : 6'h23; // @[MulAddRecFN.scala:114:{16,34}, :115:33] wire [5:0] CAlignDist = isMinCAlign ? 6'h0 : _CAlignDist_T_2; // @[MulAddRecFN.scala:108:50, :112:12, :114:16] wire [11:0] _mainAlignedSigC_T = ~rawC_sig; // @[rawFloatFromRecFN.scala:55:23] wire [11:0] _mainAlignedSigC_T_1 = doSubMags ? _mainAlignedSigC_T : rawC_sig; // @[rawFloatFromRecFN.scala:55:23] wire [26:0] _mainAlignedSigC_T_2 = {27{doSubMags}}; // @[MulAddRecFN.scala:102:42, :120:53] wire [38:0] _mainAlignedSigC_T_3 = {_mainAlignedSigC_T_1, _mainAlignedSigC_T_2}; // @[MulAddRecFN.scala:120:{13,46,53}] wire [38:0] _mainAlignedSigC_T_4 = _mainAlignedSigC_T_3; // @[MulAddRecFN.scala:120:{46,94}] wire [38:0] mainAlignedSigC = $signed($signed(_mainAlignedSigC_T_4) >>> CAlignDist); // @[MulAddRecFN.scala:112:12, :120:{94,100}] wire _reduced4CExtra_reducedVec_0_T_1; // @[primitives.scala:120:54] wire _reduced4CExtra_reducedVec_1_T_1; // @[primitives.scala:120:54] wire _reduced4CExtra_reducedVec_2_T_1; // @[primitives.scala:123:57] wire reduced4CExtra_reducedVec_0; // @[primitives.scala:118:30] wire reduced4CExtra_reducedVec_1; // @[primitives.scala:118:30] wire reduced4CExtra_reducedVec_2; // @[primitives.scala:118:30] wire [3:0] _reduced4CExtra_reducedVec_0_T = _reduced4CExtra_T[3:0]; // @[primitives.scala:120:33] assign _reduced4CExtra_reducedVec_0_T_1 = |_reduced4CExtra_reducedVec_0_T; // @[primitives.scala:120:{33,54}] assign reduced4CExtra_reducedVec_0 = _reduced4CExtra_reducedVec_0_T_1; // @[primitives.scala:118:30, :120:54] wire [3:0] _reduced4CExtra_reducedVec_1_T = _reduced4CExtra_T[7:4]; // @[primitives.scala:120:33] assign _reduced4CExtra_reducedVec_1_T_1 = |_reduced4CExtra_reducedVec_1_T; // @[primitives.scala:120:{33,54}] assign reduced4CExtra_reducedVec_1 = _reduced4CExtra_reducedVec_1_T_1; // @[primitives.scala:118:30, :120:54] wire [3:0] _reduced4CExtra_reducedVec_2_T = _reduced4CExtra_T[11:8]; // @[primitives.scala:123:15] assign _reduced4CExtra_reducedVec_2_T_1 = |_reduced4CExtra_reducedVec_2_T; // @[primitives.scala:123:{15,57}] assign reduced4CExtra_reducedVec_2 = _reduced4CExtra_reducedVec_2_T_1; // @[primitives.scala:118:30, :123:57] wire [1:0] reduced4CExtra_hi = {reduced4CExtra_reducedVec_2, reduced4CExtra_reducedVec_1}; // @[primitives.scala:118:30, :124:20] wire [2:0] _reduced4CExtra_T_1 = {reduced4CExtra_hi, reduced4CExtra_reducedVec_0}; // @[primitives.scala:118:30, :124:20] wire [3:0] _reduced4CExtra_T_2 = CAlignDist[5:2]; // @[MulAddRecFN.scala:112:12, :124:28] wire [16:0] reduced4CExtra_shift = $signed(17'sh10000 >>> _reduced4CExtra_T_2); // @[primitives.scala:76:56] wire [1:0] _reduced4CExtra_T_3 = reduced4CExtra_shift[9:8]; // @[primitives.scala:76:56, :78:22] wire _reduced4CExtra_T_4 = _reduced4CExtra_T_3[0]; // @[primitives.scala:77:20, :78:22] wire _reduced4CExtra_T_5 = _reduced4CExtra_T_3[1]; // @[primitives.scala:77:20, :78:22] wire [1:0] _reduced4CExtra_T_6 = {_reduced4CExtra_T_4, _reduced4CExtra_T_5}; // @[primitives.scala:77:20] wire [2:0] _reduced4CExtra_T_7 = {1'h0, _reduced4CExtra_T_1[1:0] & _reduced4CExtra_T_6}; // @[primitives.scala:77:20, :124:20] wire reduced4CExtra = |_reduced4CExtra_T_7; // @[MulAddRecFN.scala:122:68, :130:11] wire [35:0] _alignedSigC_T = mainAlignedSigC[38:3]; // @[MulAddRecFN.scala:120:100, :132:28] wire [35:0] alignedSigC_hi = _alignedSigC_T; // @[MulAddRecFN.scala:132:{12,28}] wire [2:0] _alignedSigC_T_1 = mainAlignedSigC[2:0]; // @[MulAddRecFN.scala:120:100, :134:32] wire [2:0] _alignedSigC_T_5 = mainAlignedSigC[2:0]; // @[MulAddRecFN.scala:120:100, :134:32, :135:32] wire _alignedSigC_T_2 = &_alignedSigC_T_1; // @[MulAddRecFN.scala:134:{32,39}] wire _alignedSigC_T_3 = ~reduced4CExtra; // @[MulAddRecFN.scala:130:11, :134:47] wire _alignedSigC_T_4 = _alignedSigC_T_2 & _alignedSigC_T_3; // @[MulAddRecFN.scala:134:{39,44,47}] wire _alignedSigC_T_6 = |_alignedSigC_T_5; // @[MulAddRecFN.scala:135:{32,39}] wire _alignedSigC_T_7 = _alignedSigC_T_6 | reduced4CExtra; // @[MulAddRecFN.scala:130:11, :135:{39,44}] wire _alignedSigC_T_8 = doSubMags ? _alignedSigC_T_4 : _alignedSigC_T_7; // @[MulAddRecFN.scala:102:42, :133:16, :134:44, :135:44] wire [36:0] alignedSigC = {alignedSigC_hi, _alignedSigC_T_8}; // @[MulAddRecFN.scala:132:12, :133:16] assign io_mulAddA_0 = rawA_sig[10:0]; // @[rawFloatFromRecFN.scala:55:23] assign io_mulAddB_0 = rawB_sig[10:0]; // @[rawFloatFromRecFN.scala:55:23] assign _io_mulAddC_T = alignedSigC[22:1]; // @[MulAddRecFN.scala:132:12, :143:30] assign io_mulAddC_0 = _io_mulAddC_T; // @[MulAddRecFN.scala:71:7, :143:30] wire _io_toPostMul_isSigNaNAny_T = rawA_sig[9]; // @[rawFloatFromRecFN.scala:55:23] wire _io_toPostMul_isSigNaNAny_T_1 = ~_io_toPostMul_isSigNaNAny_T; // @[common.scala:82:{49,56}] wire _io_toPostMul_isSigNaNAny_T_2 = rawA_isNaN & _io_toPostMul_isSigNaNAny_T_1; // @[rawFloatFromRecFN.scala:55:23] wire _io_toPostMul_isSigNaNAny_T_3 = rawB_sig[9]; // @[rawFloatFromRecFN.scala:55:23] wire _io_toPostMul_isSigNaNAny_T_4 = ~_io_toPostMul_isSigNaNAny_T_3; // @[common.scala:82:{49,56}] wire _io_toPostMul_isSigNaNAny_T_5 = rawB_isNaN & _io_toPostMul_isSigNaNAny_T_4; // @[rawFloatFromRecFN.scala:55:23] wire _io_toPostMul_isSigNaNAny_T_6 = _io_toPostMul_isSigNaNAny_T_2 | _io_toPostMul_isSigNaNAny_T_5; // @[common.scala:82:46] wire _io_toPostMul_isSigNaNAny_T_7 = rawC_sig[9]; // @[rawFloatFromRecFN.scala:55:23] wire _io_toPostMul_isSigNaNAny_T_8 = ~_io_toPostMul_isSigNaNAny_T_7; // @[common.scala:82:{49,56}] wire _io_toPostMul_isSigNaNAny_T_9 = rawC_isNaN & _io_toPostMul_isSigNaNAny_T_8; // @[rawFloatFromRecFN.scala:55:23] assign _io_toPostMul_isSigNaNAny_T_10 = _io_toPostMul_isSigNaNAny_T_6 | _io_toPostMul_isSigNaNAny_T_9; // @[common.scala:82:46] assign io_toPostMul_isSigNaNAny_0 = _io_toPostMul_isSigNaNAny_T_10; // @[MulAddRecFN.scala:71:7, :146:58] assign _io_toPostMul_isNaNAOrB_T = rawA_isNaN | rawB_isNaN; // @[rawFloatFromRecFN.scala:55:23] assign io_toPostMul_isNaNAOrB_0 = _io_toPostMul_isNaNAOrB_T; // @[MulAddRecFN.scala:71:7, :148:42] wire [8:0] _io_toPostMul_sExpSum_T = _GEN - 9'hB; // @[MulAddRecFN.scala:106:42, :158:53] wire [7:0] _io_toPostMul_sExpSum_T_1 = _io_toPostMul_sExpSum_T[7:0]; // @[MulAddRecFN.scala:158:53] wire [7:0] _io_toPostMul_sExpSum_T_2 = _io_toPostMul_sExpSum_T_1; // @[MulAddRecFN.scala:158:53] wire [7:0] _io_toPostMul_sExpSum_T_3 = CIsDominant ? {rawC_sExp[6], rawC_sExp} : _io_toPostMul_sExpSum_T_2; // @[rawFloatFromRecFN.scala:55:23] assign io_toPostMul_sExpSum_0 = _io_toPostMul_sExpSum_T_3[6:0]; // @[MulAddRecFN.scala:71:7, :157:28, :158:12] assign _io_toPostMul_CDom_CAlignDist_T = CAlignDist[3:0]; // @[MulAddRecFN.scala:112:12, :161:47] assign io_toPostMul_CDom_CAlignDist_0 = _io_toPostMul_CDom_CAlignDist_T; // @[MulAddRecFN.scala:71:7, :161:47] assign _io_toPostMul_highAlignedSigC_T = alignedSigC[35:23]; // @[MulAddRecFN.scala:132:12, :163:20] assign io_toPostMul_highAlignedSigC_0 = _io_toPostMul_highAlignedSigC_T; // @[MulAddRecFN.scala:71:7, :163:20] assign _io_toPostMul_bit0AlignedSigC_T = alignedSigC[0]; // @[MulAddRecFN.scala:132:12, :164:48] assign io_toPostMul_bit0AlignedSigC_0 = _io_toPostMul_bit0AlignedSigC_T; // @[MulAddRecFN.scala:71:7, :164:48] assign io_mulAddA = io_mulAddA_0; // @[MulAddRecFN.scala:71:7] assign io_mulAddB = io_mulAddB_0; // @[MulAddRecFN.scala:71:7] assign io_mulAddC = io_mulAddC_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_isSigNaNAny = io_toPostMul_isSigNaNAny_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_isNaNAOrB = io_toPostMul_isNaNAOrB_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_isInfA = io_toPostMul_isInfA_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_isZeroA = io_toPostMul_isZeroA_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_isInfB = io_toPostMul_isInfB_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_isZeroB = io_toPostMul_isZeroB_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_signProd = io_toPostMul_signProd_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_isNaNC = io_toPostMul_isNaNC_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_isInfC = io_toPostMul_isInfC_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_isZeroC = io_toPostMul_isZeroC_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_sExpSum = io_toPostMul_sExpSum_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_doSubMags = io_toPostMul_doSubMags_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_CIsDominant = io_toPostMul_CIsDominant_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_CDom_CAlignDist = io_toPostMul_CDom_CAlignDist_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_highAlignedSigC = io_toPostMul_highAlignedSigC_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_bit0AlignedSigC = io_toPostMul_bit0AlignedSigC_0; // @[MulAddRecFN.scala:71:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File MSHR.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import freechips.rocketchip.tilelink._ import TLPermissions._ import TLMessages._ import MetaData._ import chisel3.PrintableHelper import chisel3.experimental.dataview._ class ScheduleRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val a = Valid(new SourceARequest(params)) val b = Valid(new SourceBRequest(params)) val c = Valid(new SourceCRequest(params)) val d = Valid(new SourceDRequest(params)) val e = Valid(new SourceERequest(params)) val x = Valid(new SourceXRequest(params)) val dir = Valid(new DirectoryWrite(params)) val reload = Bool() // get next request via allocate (if any) } class MSHRStatus(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val set = UInt(params.setBits.W) val tag = UInt(params.tagBits.W) val way = UInt(params.wayBits.W) val blockB = Bool() val nestB = Bool() val blockC = Bool() val nestC = Bool() } class NestedWriteback(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val set = UInt(params.setBits.W) val tag = UInt(params.tagBits.W) val b_toN = Bool() // nested Probes may unhit us val b_toB = Bool() // nested Probes may demote us val b_clr_dirty = Bool() // nested Probes clear dirty val c_set_dirty = Bool() // nested Releases MAY set dirty } sealed trait CacheState { val code = CacheState.index.U CacheState.index = CacheState.index + 1 } object CacheState { var index = 0 } case object S_INVALID extends CacheState case object S_BRANCH extends CacheState case object S_BRANCH_C extends CacheState case object S_TIP extends CacheState case object S_TIP_C extends CacheState case object S_TIP_CD extends CacheState case object S_TIP_D extends CacheState case object S_TRUNK_C extends CacheState case object S_TRUNK_CD extends CacheState class MSHR(params: InclusiveCacheParameters) extends Module { val io = IO(new Bundle { val allocate = Flipped(Valid(new AllocateRequest(params))) // refills MSHR for next cycle val directory = Flipped(Valid(new DirectoryResult(params))) // triggers schedule setup val status = Valid(new MSHRStatus(params)) val schedule = Decoupled(new ScheduleRequest(params)) val sinkc = Flipped(Valid(new SinkCResponse(params))) val sinkd = Flipped(Valid(new SinkDResponse(params))) val sinke = Flipped(Valid(new SinkEResponse(params))) val nestedwb = Flipped(new NestedWriteback(params)) }) val request_valid = RegInit(false.B) val request = Reg(new FullRequest(params)) val meta_valid = RegInit(false.B) val meta = Reg(new DirectoryResult(params)) // Define which states are valid when (meta_valid) { when (meta.state === INVALID) { assert (!meta.clients.orR) assert (!meta.dirty) } when (meta.state === BRANCH) { assert (!meta.dirty) } when (meta.state === TRUNK) { assert (meta.clients.orR) assert ((meta.clients & (meta.clients - 1.U)) === 0.U) // at most one } when (meta.state === TIP) { // noop } } // Completed transitions (s_ = scheduled), (w_ = waiting) val s_rprobe = RegInit(true.B) // B val w_rprobeackfirst = RegInit(true.B) val w_rprobeacklast = RegInit(true.B) val s_release = RegInit(true.B) // CW w_rprobeackfirst val w_releaseack = RegInit(true.B) val s_pprobe = RegInit(true.B) // B val s_acquire = RegInit(true.B) // A s_release, s_pprobe [1] val s_flush = RegInit(true.B) // X w_releaseack val w_grantfirst = RegInit(true.B) val w_grantlast = RegInit(true.B) val w_grant = RegInit(true.B) // first | last depending on wormhole val w_pprobeackfirst = RegInit(true.B) val w_pprobeacklast = RegInit(true.B) val w_pprobeack = RegInit(true.B) // first | last depending on wormhole val s_probeack = RegInit(true.B) // C w_pprobeackfirst (mutually exclusive with next two s_*) val s_grantack = RegInit(true.B) // E w_grantfirst ... CAN require both outE&inD to service outD val s_execute = RegInit(true.B) // D w_pprobeack, w_grant val w_grantack = RegInit(true.B) val s_writeback = RegInit(true.B) // W w_* // [1]: We cannot issue outer Acquire while holding blockB (=> outA can stall) // However, inB and outC are higher priority than outB, so s_release and s_pprobe // may be safely issued while blockB. Thus we must NOT try to schedule the // potentially stuck s_acquire with either of them (scheduler is all or none). // Meta-data that we discover underway val sink = Reg(UInt(params.outer.bundle.sinkBits.W)) val gotT = Reg(Bool()) val bad_grant = Reg(Bool()) val probes_done = Reg(UInt(params.clientBits.W)) val probes_toN = Reg(UInt(params.clientBits.W)) val probes_noT = Reg(Bool()) // When a nested transaction completes, update our meta data when (meta_valid && meta.state =/= INVALID && io.nestedwb.set === request.set && io.nestedwb.tag === meta.tag) { when (io.nestedwb.b_clr_dirty) { meta.dirty := false.B } when (io.nestedwb.c_set_dirty) { meta.dirty := true.B } when (io.nestedwb.b_toB) { meta.state := BRANCH } when (io.nestedwb.b_toN) { meta.hit := false.B } } // Scheduler status io.status.valid := request_valid io.status.bits.set := request.set io.status.bits.tag := request.tag io.status.bits.way := meta.way io.status.bits.blockB := !meta_valid || ((!w_releaseack || !w_rprobeacklast || !w_pprobeacklast) && !w_grantfirst) io.status.bits.nestB := meta_valid && w_releaseack && w_rprobeacklast && w_pprobeacklast && !w_grantfirst // The above rules ensure we will block and not nest an outer probe while still doing our // own inner probes. Thus every probe wakes exactly one MSHR. io.status.bits.blockC := !meta_valid io.status.bits.nestC := meta_valid && (!w_rprobeackfirst || !w_pprobeackfirst || !w_grantfirst) // The w_grantfirst in nestC is necessary to deal with: // acquire waiting for grant, inner release gets queued, outer probe -> inner probe -> deadlock // ... this is possible because the release+probe can be for same set, but different tag // We can only demand: block, nest, or queue assert (!io.status.bits.nestB || !io.status.bits.blockB) assert (!io.status.bits.nestC || !io.status.bits.blockC) // Scheduler requests val no_wait = w_rprobeacklast && w_releaseack && w_grantlast && w_pprobeacklast && w_grantack io.schedule.bits.a.valid := !s_acquire && s_release && s_pprobe io.schedule.bits.b.valid := !s_rprobe || !s_pprobe io.schedule.bits.c.valid := (!s_release && w_rprobeackfirst) || (!s_probeack && w_pprobeackfirst) io.schedule.bits.d.valid := !s_execute && w_pprobeack && w_grant io.schedule.bits.e.valid := !s_grantack && w_grantfirst io.schedule.bits.x.valid := !s_flush && w_releaseack io.schedule.bits.dir.valid := (!s_release && w_rprobeackfirst) || (!s_writeback && no_wait) io.schedule.bits.reload := no_wait io.schedule.valid := io.schedule.bits.a.valid || io.schedule.bits.b.valid || io.schedule.bits.c.valid || io.schedule.bits.d.valid || io.schedule.bits.e.valid || io.schedule.bits.x.valid || io.schedule.bits.dir.valid // Schedule completions when (io.schedule.ready) { s_rprobe := true.B when (w_rprobeackfirst) { s_release := true.B } s_pprobe := true.B when (s_release && s_pprobe) { s_acquire := true.B } when (w_releaseack) { s_flush := true.B } when (w_pprobeackfirst) { s_probeack := true.B } when (w_grantfirst) { s_grantack := true.B } when (w_pprobeack && w_grant) { s_execute := true.B } when (no_wait) { s_writeback := true.B } // Await the next operation when (no_wait) { request_valid := false.B meta_valid := false.B } } // Resulting meta-data val final_meta_writeback = WireInit(meta) val req_clientBit = params.clientBit(request.source) val req_needT = needT(request.opcode, request.param) val req_acquire = request.opcode === AcquireBlock || request.opcode === AcquirePerm val meta_no_clients = !meta.clients.orR val req_promoteT = req_acquire && Mux(meta.hit, meta_no_clients && meta.state === TIP, gotT) when (request.prio(2) && (!params.firstLevel).B) { // always a hit final_meta_writeback.dirty := meta.dirty || request.opcode(0) final_meta_writeback.state := Mux(request.param =/= TtoT && meta.state === TRUNK, TIP, meta.state) final_meta_writeback.clients := meta.clients & ~Mux(isToN(request.param), req_clientBit, 0.U) final_meta_writeback.hit := true.B // chained requests are hits } .elsewhen (request.control && params.control.B) { // request.prio(0) when (meta.hit) { final_meta_writeback.dirty := false.B final_meta_writeback.state := INVALID final_meta_writeback.clients := meta.clients & ~probes_toN } final_meta_writeback.hit := false.B } .otherwise { final_meta_writeback.dirty := (meta.hit && meta.dirty) || !request.opcode(2) final_meta_writeback.state := Mux(req_needT, Mux(req_acquire, TRUNK, TIP), Mux(!meta.hit, Mux(gotT, Mux(req_acquire, TRUNK, TIP), BRANCH), MuxLookup(meta.state, 0.U(2.W))(Seq( INVALID -> BRANCH, BRANCH -> BRANCH, TRUNK -> TIP, TIP -> Mux(meta_no_clients && req_acquire, TRUNK, TIP))))) final_meta_writeback.clients := Mux(meta.hit, meta.clients & ~probes_toN, 0.U) | Mux(req_acquire, req_clientBit, 0.U) final_meta_writeback.tag := request.tag final_meta_writeback.hit := true.B } when (bad_grant) { when (meta.hit) { // upgrade failed (B -> T) assert (!meta_valid || meta.state === BRANCH) final_meta_writeback.hit := true.B final_meta_writeback.dirty := false.B final_meta_writeback.state := BRANCH final_meta_writeback.clients := meta.clients & ~probes_toN } .otherwise { // failed N -> (T or B) final_meta_writeback.hit := false.B final_meta_writeback.dirty := false.B final_meta_writeback.state := INVALID final_meta_writeback.clients := 0.U } } val invalid = Wire(new DirectoryEntry(params)) invalid.dirty := false.B invalid.state := INVALID invalid.clients := 0.U invalid.tag := 0.U // Just because a client says BtoT, by the time we process the request he may be N. // Therefore, we must consult our own meta-data state to confirm he owns the line still. val honour_BtoT = meta.hit && (meta.clients & req_clientBit).orR // The client asking us to act is proof they don't have permissions. val excluded_client = Mux(meta.hit && request.prio(0) && skipProbeN(request.opcode, params.cache.hintsSkipProbe), req_clientBit, 0.U) io.schedule.bits.a.bits.tag := request.tag io.schedule.bits.a.bits.set := request.set io.schedule.bits.a.bits.param := Mux(req_needT, Mux(meta.hit, BtoT, NtoT), NtoB) io.schedule.bits.a.bits.block := request.size =/= log2Ceil(params.cache.blockBytes).U || !(request.opcode === PutFullData || request.opcode === AcquirePerm) io.schedule.bits.a.bits.source := 0.U io.schedule.bits.b.bits.param := Mux(!s_rprobe, toN, Mux(request.prio(1), request.param, Mux(req_needT, toN, toB))) io.schedule.bits.b.bits.tag := Mux(!s_rprobe, meta.tag, request.tag) io.schedule.bits.b.bits.set := request.set io.schedule.bits.b.bits.clients := meta.clients & ~excluded_client io.schedule.bits.c.bits.opcode := Mux(meta.dirty, ReleaseData, Release) io.schedule.bits.c.bits.param := Mux(meta.state === BRANCH, BtoN, TtoN) io.schedule.bits.c.bits.source := 0.U io.schedule.bits.c.bits.tag := meta.tag io.schedule.bits.c.bits.set := request.set io.schedule.bits.c.bits.way := meta.way io.schedule.bits.c.bits.dirty := meta.dirty io.schedule.bits.d.bits.viewAsSupertype(chiselTypeOf(request)) := request io.schedule.bits.d.bits.param := Mux(!req_acquire, request.param, MuxLookup(request.param, request.param)(Seq( NtoB -> Mux(req_promoteT, NtoT, NtoB), BtoT -> Mux(honour_BtoT, BtoT, NtoT), NtoT -> NtoT))) io.schedule.bits.d.bits.sink := 0.U io.schedule.bits.d.bits.way := meta.way io.schedule.bits.d.bits.bad := bad_grant io.schedule.bits.e.bits.sink := sink io.schedule.bits.x.bits.fail := false.B io.schedule.bits.dir.bits.set := request.set io.schedule.bits.dir.bits.way := meta.way io.schedule.bits.dir.bits.data := Mux(!s_release, invalid, WireInit(new DirectoryEntry(params), init = final_meta_writeback)) // Coverage of state transitions def cacheState(entry: DirectoryEntry, hit: Bool) = { val out = WireDefault(0.U) val c = entry.clients.orR val d = entry.dirty switch (entry.state) { is (BRANCH) { out := Mux(c, S_BRANCH_C.code, S_BRANCH.code) } is (TRUNK) { out := Mux(d, S_TRUNK_CD.code, S_TRUNK_C.code) } is (TIP) { out := Mux(c, Mux(d, S_TIP_CD.code, S_TIP_C.code), Mux(d, S_TIP_D.code, S_TIP.code)) } is (INVALID) { out := S_INVALID.code } } when (!hit) { out := S_INVALID.code } out } val p = !params.lastLevel // can be probed val c = !params.firstLevel // can be acquired val m = params.inner.client.clients.exists(!_.supports.probe) // can be written (or read) val r = params.outer.manager.managers.exists(!_.alwaysGrantsT) // read-only devices exist val f = params.control // flush control register exists val cfg = (p, c, m, r, f) val b = r || p // can reach branch state (via probe downgrade or read-only device) // The cache must be used for something or we would not be here require(c || m) val evict = cacheState(meta, !meta.hit) val before = cacheState(meta, meta.hit) val after = cacheState(final_meta_writeback, true.B) def eviction(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(evict === from.code, s"MSHR_${from}_EVICT", s"State transition from ${from} to evicted ${cfg}") } else { assert(!(evict === from.code), cf"State transition from ${from} to evicted should be impossible ${cfg}") } if (cover && f) { params.ccover(before === from.code, s"MSHR_${from}_FLUSH", s"State transition from ${from} to flushed ${cfg}") } else { assert(!(before === from.code), cf"State transition from ${from} to flushed should be impossible ${cfg}") } } def transition(from: CacheState, to: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(before === from.code && after === to.code, s"MSHR_${from}_${to}", s"State transition from ${from} to ${to} ${cfg}") } else { assert(!(before === from.code && after === to.code), cf"State transition from ${from} to ${to} should be impossible ${cfg}") } } when ((!s_release && w_rprobeackfirst) && io.schedule.ready) { eviction(S_BRANCH, b) // MMIO read to read-only device eviction(S_BRANCH_C, b && c) // you need children to become C eviction(S_TIP, true) // MMIO read || clean release can lead to this state eviction(S_TIP_C, c) // needs two clients || client + mmio || downgrading client eviction(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client eviction(S_TIP_D, true) // MMIO write || dirty release lead here eviction(S_TRUNK_C, c) // acquire for write eviction(S_TRUNK_CD, c) // dirty release then reacquire } when ((!s_writeback && no_wait) && io.schedule.ready) { transition(S_INVALID, S_BRANCH, b && m) // only MMIO can bring us to BRANCH state transition(S_INVALID, S_BRANCH_C, b && c) // C state is only possible if there are inner caches transition(S_INVALID, S_TIP, m) // MMIO read transition(S_INVALID, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_INVALID, S_TIP_CD, false) // acquire does not cause dirty immediately transition(S_INVALID, S_TIP_D, m) // MMIO write transition(S_INVALID, S_TRUNK_C, c) // acquire transition(S_INVALID, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH, S_INVALID, b && p) // probe can do this (flushes run as evictions) transition(S_BRANCH, S_BRANCH_C, b && c) // acquire transition(S_BRANCH, S_TIP, b && m) // prefetch write transition(S_BRANCH, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_BRANCH, S_TIP_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH, S_TIP_D, b && m) // MMIO write transition(S_BRANCH, S_TRUNK_C, b && c) // acquire transition(S_BRANCH, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH_C, S_INVALID, b && c && p) transition(S_BRANCH_C, S_BRANCH, b && c) // clean release (optional) transition(S_BRANCH_C, S_TIP, b && c && m) // prefetch write transition(S_BRANCH_C, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_BRANCH_C, S_TIP_D, b && c && m) // MMIO write transition(S_BRANCH_C, S_TIP_CD, false) // going dirty means we must shoot down clients transition(S_BRANCH_C, S_TRUNK_C, b && c) // acquire transition(S_BRANCH_C, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_TIP, S_INVALID, p) transition(S_TIP, S_BRANCH, p) // losing TIP only possible via probe transition(S_TIP, S_BRANCH_C, false) // we would go S_TRUNK_C instead transition(S_TIP, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP, S_TIP_D, m) // direct dirty only via MMIO write transition(S_TIP, S_TIP_CD, false) // acquire does not make us dirty immediately transition(S_TIP, S_TRUNK_C, c) // acquire transition(S_TIP, S_TRUNK_CD, false) // acquire does not make us dirty immediately transition(S_TIP_C, S_INVALID, c && p) transition(S_TIP_C, S_BRANCH, c && p) // losing TIP only possible via probe transition(S_TIP_C, S_BRANCH_C, c && p) // losing TIP only possible via probe transition(S_TIP_C, S_TIP, c) // probed while MMIO read || clean release (optional) transition(S_TIP_C, S_TIP_D, c && m) // direct dirty only via MMIO write transition(S_TIP_C, S_TIP_CD, false) // going dirty means we must shoot down clients transition(S_TIP_C, S_TRUNK_C, c) // acquire transition(S_TIP_C, S_TRUNK_CD, false) // acquire does not make us immediately dirty transition(S_TIP_D, S_INVALID, p) transition(S_TIP_D, S_BRANCH, p) // losing D is only possible via probe transition(S_TIP_D, S_BRANCH_C, p && c) // probed while acquire shared transition(S_TIP_D, S_TIP, p) // probed while MMIO read || outer probe.toT (optional) transition(S_TIP_D, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP_D, S_TIP_CD, false) // we would go S_TRUNK_CD instead transition(S_TIP_D, S_TRUNK_C, p && c) // probed while acquired transition(S_TIP_D, S_TRUNK_CD, c) // acquire transition(S_TIP_CD, S_INVALID, c && p) transition(S_TIP_CD, S_BRANCH, c && p) // losing D is only possible via probe transition(S_TIP_CD, S_BRANCH_C, c && p) // losing D is only possible via probe transition(S_TIP_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional) transition(S_TIP_CD, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP_CD, S_TIP_D, c) // MMIO write || clean release (optional) transition(S_TIP_CD, S_TRUNK_C, c && p) // probed while acquire transition(S_TIP_CD, S_TRUNK_CD, c) // acquire transition(S_TRUNK_C, S_INVALID, c && p) transition(S_TRUNK_C, S_BRANCH, c && p) // losing TIP only possible via probe transition(S_TRUNK_C, S_BRANCH_C, c && p) // losing TIP only possible via probe transition(S_TRUNK_C, S_TIP, c) // MMIO read || clean release (optional) transition(S_TRUNK_C, S_TIP_C, c) // bounce shared transition(S_TRUNK_C, S_TIP_D, c) // dirty release transition(S_TRUNK_C, S_TIP_CD, c) // dirty bounce shared transition(S_TRUNK_C, S_TRUNK_CD, c) // dirty bounce transition(S_TRUNK_CD, S_INVALID, c && p) transition(S_TRUNK_CD, S_BRANCH, c && p) // losing D only possible via probe transition(S_TRUNK_CD, S_BRANCH_C, c && p) // losing D only possible via probe transition(S_TRUNK_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional) transition(S_TRUNK_CD, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TRUNK_CD, S_TIP_D, c) // dirty release transition(S_TRUNK_CD, S_TIP_CD, c) // bounce shared transition(S_TRUNK_CD, S_TRUNK_C, c && p) // probed while acquire } // Handle response messages val probe_bit = params.clientBit(io.sinkc.bits.source) val last_probe = (probes_done | probe_bit) === (meta.clients & ~excluded_client) val probe_toN = isToN(io.sinkc.bits.param) if (!params.firstLevel) when (io.sinkc.valid) { params.ccover( probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_FULL", "Client downgraded to N when asked only to do B") params.ccover(!probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_HALF", "Client downgraded to B when asked only to do B") // Caution: the probe matches us only in set. // We would never allow an outer probe to nest until both w_[rp]probeack complete, so // it is safe to just unguardedly update the probe FSM. probes_done := probes_done | probe_bit probes_toN := probes_toN | Mux(probe_toN, probe_bit, 0.U) probes_noT := probes_noT || io.sinkc.bits.param =/= TtoT w_rprobeackfirst := w_rprobeackfirst || last_probe w_rprobeacklast := w_rprobeacklast || (last_probe && io.sinkc.bits.last) w_pprobeackfirst := w_pprobeackfirst || last_probe w_pprobeacklast := w_pprobeacklast || (last_probe && io.sinkc.bits.last) // Allow wormhole routing from sinkC if the first request beat has offset 0 val set_pprobeack = last_probe && (io.sinkc.bits.last || request.offset === 0.U) w_pprobeack := w_pprobeack || set_pprobeack params.ccover(!set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_SERIAL", "Sequential routing of probe response data") params.ccover( set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_WORMHOLE", "Wormhole routing of probe response data") // However, meta-data updates need to be done more cautiously when (meta.state =/= INVALID && io.sinkc.bits.tag === meta.tag && io.sinkc.bits.data) { meta.dirty := true.B } // !!! } when (io.sinkd.valid) { when (io.sinkd.bits.opcode === Grant || io.sinkd.bits.opcode === GrantData) { sink := io.sinkd.bits.sink w_grantfirst := true.B w_grantlast := io.sinkd.bits.last // Record if we need to prevent taking ownership bad_grant := io.sinkd.bits.denied // Allow wormhole routing for requests whose first beat has offset 0 w_grant := request.offset === 0.U || io.sinkd.bits.last params.ccover(io.sinkd.bits.opcode === GrantData && request.offset === 0.U, "MSHR_GRANT_WORMHOLE", "Wormhole routing of grant response data") params.ccover(io.sinkd.bits.opcode === GrantData && request.offset =/= 0.U, "MSHR_GRANT_SERIAL", "Sequential routing of grant response data") gotT := io.sinkd.bits.param === toT } .elsewhen (io.sinkd.bits.opcode === ReleaseAck) { w_releaseack := true.B } } when (io.sinke.valid) { w_grantack := true.B } // Bootstrap new requests val allocate_as_full = WireInit(new FullRequest(params), init = io.allocate.bits) val new_meta = Mux(io.allocate.valid && io.allocate.bits.repeat, final_meta_writeback, io.directory.bits) val new_request = Mux(io.allocate.valid, allocate_as_full, request) val new_needT = needT(new_request.opcode, new_request.param) val new_clientBit = params.clientBit(new_request.source) val new_skipProbe = Mux(skipProbeN(new_request.opcode, params.cache.hintsSkipProbe), new_clientBit, 0.U) val prior = cacheState(final_meta_writeback, true.B) def bypass(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(prior === from.code, s"MSHR_${from}_BYPASS", s"State bypass transition from ${from} ${cfg}") } else { assert(!(prior === from.code), cf"State bypass from ${from} should be impossible ${cfg}") } } when (io.allocate.valid && io.allocate.bits.repeat) { bypass(S_INVALID, f || p) // Can lose permissions (probe/flush) bypass(S_BRANCH, b) // MMIO read to read-only device bypass(S_BRANCH_C, b && c) // you need children to become C bypass(S_TIP, true) // MMIO read || clean release can lead to this state bypass(S_TIP_C, c) // needs two clients || client + mmio || downgrading client bypass(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client bypass(S_TIP_D, true) // MMIO write || dirty release lead here bypass(S_TRUNK_C, c) // acquire for write bypass(S_TRUNK_CD, c) // dirty release then reacquire } when (io.allocate.valid) { assert (!request_valid || (no_wait && io.schedule.fire)) request_valid := true.B request := io.allocate.bits } // Create execution plan when (io.directory.valid || (io.allocate.valid && io.allocate.bits.repeat)) { meta_valid := true.B meta := new_meta probes_done := 0.U probes_toN := 0.U probes_noT := false.B gotT := false.B bad_grant := false.B // These should already be either true or turning true // We clear them here explicitly to simplify the mux tree s_rprobe := true.B w_rprobeackfirst := true.B w_rprobeacklast := true.B s_release := true.B w_releaseack := true.B s_pprobe := true.B s_acquire := true.B s_flush := true.B w_grantfirst := true.B w_grantlast := true.B w_grant := true.B w_pprobeackfirst := true.B w_pprobeacklast := true.B w_pprobeack := true.B s_probeack := true.B s_grantack := true.B s_execute := true.B w_grantack := true.B s_writeback := true.B // For C channel requests (ie: Release[Data]) when (new_request.prio(2) && (!params.firstLevel).B) { s_execute := false.B // Do we need to go dirty? when (new_request.opcode(0) && !new_meta.dirty) { s_writeback := false.B } // Does our state change? when (isToB(new_request.param) && new_meta.state === TRUNK) { s_writeback := false.B } // Do our clients change? when (isToN(new_request.param) && (new_meta.clients & new_clientBit) =/= 0.U) { s_writeback := false.B } assert (new_meta.hit) } // For X channel requests (ie: flush) .elsewhen (new_request.control && params.control.B) { // new_request.prio(0) s_flush := false.B // Do we need to actually do something? when (new_meta.hit) { s_release := false.B w_releaseack := false.B // Do we need to shoot-down inner caches? when ((!params.firstLevel).B && (new_meta.clients =/= 0.U)) { s_rprobe := false.B w_rprobeackfirst := false.B w_rprobeacklast := false.B } } } // For A channel requests .otherwise { // new_request.prio(0) && !new_request.control s_execute := false.B // Do we need an eviction? when (!new_meta.hit && new_meta.state =/= INVALID) { s_release := false.B w_releaseack := false.B // Do we need to shoot-down inner caches? when ((!params.firstLevel).B & (new_meta.clients =/= 0.U)) { s_rprobe := false.B w_rprobeackfirst := false.B w_rprobeacklast := false.B } } // Do we need an acquire? when (!new_meta.hit || (new_meta.state === BRANCH && new_needT)) { s_acquire := false.B w_grantfirst := false.B w_grantlast := false.B w_grant := false.B s_grantack := false.B s_writeback := false.B } // Do we need a probe? when ((!params.firstLevel).B && (new_meta.hit && (new_needT || new_meta.state === TRUNK) && (new_meta.clients & ~new_skipProbe) =/= 0.U)) { s_pprobe := false.B w_pprobeackfirst := false.B w_pprobeacklast := false.B w_pprobeack := false.B s_writeback := false.B } // Do we need a grantack? when (new_request.opcode === AcquireBlock || new_request.opcode === AcquirePerm) { w_grantack := false.B s_writeback := false.B } // Becomes dirty? when (!new_request.opcode(2) && new_meta.hit && !new_meta.dirty) { s_writeback := false.B } } } } File Parameters.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ import freechips.rocketchip.util.property.cover import scala.math.{min,max} case class CacheParameters( level: Int, ways: Int, sets: Int, blockBytes: Int, beatBytes: Int, // inner hintsSkipProbe: Boolean) { require (ways > 0) require (sets > 0) require (blockBytes > 0 && isPow2(blockBytes)) require (beatBytes > 0 && isPow2(beatBytes)) require (blockBytes >= beatBytes) val blocks = ways * sets val sizeBytes = blocks * blockBytes val blockBeats = blockBytes/beatBytes } case class InclusiveCachePortParameters( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams) { def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e)) } object InclusiveCachePortParameters { val none = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.none) val full = InclusiveCachePortParameters( a = BufferParams.default, b = BufferParams.default, c = BufferParams.default, d = BufferParams.default, e = BufferParams.default) // This removes feed-through paths from C=>A and A=>C val fullC = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.default, d = BufferParams.none, e = BufferParams.none) val flowAD = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.flow, e = BufferParams.none) val flowAE = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.flow) // For innerBuf: // SinkA: no restrictions, flows into scheduler+putbuffer // SourceB: no restrictions, flows out of scheduler // sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore // SourceD: no restrictions, flows out of bankedStore/regout // SinkE: no restrictions, flows into scheduler // // ... so while none is possible, you probably want at least flowAC to cut ready // from the scheduler delay and flowD to ease SourceD back-pressure // For outerBufer: // SourceA: must not be pipe, flows out of scheduler // SinkB: no restrictions, flows into scheduler // SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored // SinkD: no restrictions, flows into scheduler & bankedStore // SourceE: must not be pipe, flows out of scheduler // // ... AE take the channel ready into the scheduler, so you need at least flowAE } case class InclusiveCacheMicroParameters( writeBytes: Int, // backing store update granularity memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz) portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes dirReg: Boolean = false, innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE { require (writeBytes > 0 && isPow2(writeBytes)) require (memCycles > 0) require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant } case class InclusiveCacheControlParameters( address: BigInt, beatBytes: Int, bankedControl: Boolean) case class InclusiveCacheParameters( cache: CacheParameters, micro: InclusiveCacheMicroParameters, control: Boolean, inner: TLEdgeIn, outer: TLEdgeOut)(implicit val p: Parameters) { require (cache.ways > 1) require (cache.sets > 1 && isPow2(cache.sets)) require (micro.writeBytes <= inner.manager.beatBytes) require (micro.writeBytes <= outer.manager.beatBytes) require (inner.manager.beatBytes <= cache.blockBytes) require (outer.manager.beatBytes <= cache.blockBytes) // Require that all cached address ranges have contiguous blocks outer.manager.managers.flatMap(_.address).foreach { a => require (a.alignment >= cache.blockBytes) } // If we are the first level cache, we do not need to support inner-BCE val firstLevel = !inner.client.clients.exists(_.supports.probe) // If we are the last level cache, we do not need to support outer-B val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED) require (lastLevel) // Provision enough resources to achieve full throughput with missing single-beat accesses val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro) val secondary = max(mshrs, micro.memCycles - mshrs) val putLists = micro.memCycles // allow every request to be single beat val putBeats = max(2*cache.blockBeats, micro.memCycles) val relLists = 2 val relBeats = relLists*cache.blockBeats val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address)) val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_)) def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] = if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail) val addressMapping = bitOffsets(pickMask) val addressBits = addressMapping.size // println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}") val allClients = inner.client.clients.size val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size val clientBits = max(1, clientBitsRaw) val stateBits = 2 val wayBits = log2Ceil(cache.ways) val setBits = log2Ceil(cache.sets) val offsetBits = log2Ceil(cache.blockBytes) val tagBits = addressBits - setBits - offsetBits val putBits = log2Ceil(max(putLists, relLists)) require (tagBits > 0) require (offsetBits > 0) val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1 val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1 val innerMaskBits = inner.manager.beatBytes / micro.writeBytes val outerMaskBits = outer.manager.beatBytes / micro.writeBytes def clientBit(source: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse) } } def clientSource(bit: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U)) } } def parseAddress(x: UInt): (UInt, UInt, UInt) = { val offset = Cat(addressMapping.map(o => x(o,o)).reverse) val set = offset >> offsetBits val tag = set >> setBits (tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0)) } def widen(x: UInt, width: Int): UInt = { val y = x | 0.U(width.W) assert (y >> width === 0.U) y(width-1, 0) } def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = { val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits)) val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) } addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) } Cat(bits.reverse) } def restoreAddress(expanded: UInt): UInt = { val missingBits = flatAddresses .map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match .groupBy(_._1) .view .mapValues(_.map(_._2)) val muxMask = AddressDecoder(missingBits.values.toList) val mux = missingBits.toList.map { case (bits, addrs) => val widen = addrs.map(_.widen(~muxMask)) val matches = AddressSet .unify(widen.distinct) .map(_.contains(expanded)) .reduce(_ || _) (matches, bits.U) } expanded | Mux1H(mux) } def dirReg[T <: Data](x: T, en: Bool = true.B): T = { if (micro.dirReg) RegEnable(x, en) else x } def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc) } object MetaData { val stateBits = 2 def INVALID: UInt = 0.U(stateBits.W) // way is empty def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch // Does a request need trunk? def needT(opcode: UInt, param: UInt): Bool = { !opcode(2) || (opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) || ((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB) } // Does a request prove the client need not be probed? def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = { // Acquire(toB) and Get => is N, so no probe // Acquire(*toT) => is N or B, but need T, so no probe // Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client // Put* => is N or B, so probe IS needed opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B) } def isToN(param: UInt): Bool = { param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN } def isToB(param: UInt): Bool = { param === TLPermissions.TtoB || param === TLPermissions.BtoB } } object InclusiveCacheParameters { val lfsrBits = 10 val L2ControlAddress = 0x2010000 val L2ControlSize = 0x1000 def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = { // We need 2-3 normal MSHRs to cover the Directory latency // To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats) } def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = // We need a dedicated MSHR for B+C each 2 + out_mshrs(cache, micro) } class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
module MSHR_49( // @[MSHR.scala:84:7] input clock, // @[MSHR.scala:84:7] input reset, // @[MSHR.scala:84:7] input io_allocate_valid, // @[MSHR.scala:86:14] input io_allocate_bits_prio_0, // @[MSHR.scala:86:14] input io_allocate_bits_prio_1, // @[MSHR.scala:86:14] input io_allocate_bits_prio_2, // @[MSHR.scala:86:14] input io_allocate_bits_control, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_opcode, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_param, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_size, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_source, // @[MSHR.scala:86:14] input [8:0] io_allocate_bits_tag, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_offset, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_put, // @[MSHR.scala:86:14] input [10:0] io_allocate_bits_set, // @[MSHR.scala:86:14] input io_allocate_bits_repeat, // @[MSHR.scala:86:14] input io_directory_valid, // @[MSHR.scala:86:14] input io_directory_bits_dirty, // @[MSHR.scala:86:14] input [1:0] io_directory_bits_state, // @[MSHR.scala:86:14] input io_directory_bits_clients, // @[MSHR.scala:86:14] input [8:0] io_directory_bits_tag, // @[MSHR.scala:86:14] input io_directory_bits_hit, // @[MSHR.scala:86:14] input [3:0] io_directory_bits_way, // @[MSHR.scala:86:14] output io_status_valid, // @[MSHR.scala:86:14] output [10:0] io_status_bits_set, // @[MSHR.scala:86:14] output [8:0] io_status_bits_tag, // @[MSHR.scala:86:14] output [3:0] io_status_bits_way, // @[MSHR.scala:86:14] output io_status_bits_blockB, // @[MSHR.scala:86:14] output io_status_bits_nestB, // @[MSHR.scala:86:14] output io_status_bits_blockC, // @[MSHR.scala:86:14] output io_status_bits_nestC, // @[MSHR.scala:86:14] input io_schedule_ready, // @[MSHR.scala:86:14] output io_schedule_valid, // @[MSHR.scala:86:14] output io_schedule_bits_a_valid, // @[MSHR.scala:86:14] output [8:0] io_schedule_bits_a_bits_tag, // @[MSHR.scala:86:14] output [10:0] io_schedule_bits_a_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_a_bits_param, // @[MSHR.scala:86:14] output io_schedule_bits_a_bits_block, // @[MSHR.scala:86:14] output io_schedule_bits_b_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_b_bits_param, // @[MSHR.scala:86:14] output [8:0] io_schedule_bits_b_bits_tag, // @[MSHR.scala:86:14] output [10:0] io_schedule_bits_b_bits_set, // @[MSHR.scala:86:14] output io_schedule_bits_b_bits_clients, // @[MSHR.scala:86:14] output io_schedule_bits_c_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_opcode, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_param, // @[MSHR.scala:86:14] output [8:0] io_schedule_bits_c_bits_tag, // @[MSHR.scala:86:14] output [10:0] io_schedule_bits_c_bits_set, // @[MSHR.scala:86:14] output [3:0] io_schedule_bits_c_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_c_bits_dirty, // @[MSHR.scala:86:14] output io_schedule_bits_d_valid, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_0, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_1, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_2, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_control, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_opcode, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_param, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_size, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_source, // @[MSHR.scala:86:14] output [8:0] io_schedule_bits_d_bits_tag, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_offset, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_put, // @[MSHR.scala:86:14] output [10:0] io_schedule_bits_d_bits_set, // @[MSHR.scala:86:14] output [3:0] io_schedule_bits_d_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_bad, // @[MSHR.scala:86:14] output io_schedule_bits_e_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_e_bits_sink, // @[MSHR.scala:86:14] output io_schedule_bits_x_valid, // @[MSHR.scala:86:14] output io_schedule_bits_dir_valid, // @[MSHR.scala:86:14] output [10:0] io_schedule_bits_dir_bits_set, // @[MSHR.scala:86:14] output [3:0] io_schedule_bits_dir_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_dir_bits_data_dirty, // @[MSHR.scala:86:14] output [1:0] io_schedule_bits_dir_bits_data_state, // @[MSHR.scala:86:14] output io_schedule_bits_dir_bits_data_clients, // @[MSHR.scala:86:14] output [8:0] io_schedule_bits_dir_bits_data_tag, // @[MSHR.scala:86:14] output io_schedule_bits_reload, // @[MSHR.scala:86:14] input io_sinkc_valid, // @[MSHR.scala:86:14] input io_sinkc_bits_last, // @[MSHR.scala:86:14] input [10:0] io_sinkc_bits_set, // @[MSHR.scala:86:14] input [8:0] io_sinkc_bits_tag, // @[MSHR.scala:86:14] input [5:0] io_sinkc_bits_source, // @[MSHR.scala:86:14] input [2:0] io_sinkc_bits_param, // @[MSHR.scala:86:14] input io_sinkc_bits_data, // @[MSHR.scala:86:14] input io_sinkd_valid, // @[MSHR.scala:86:14] input io_sinkd_bits_last, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_opcode, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_param, // @[MSHR.scala:86:14] input [3:0] io_sinkd_bits_source, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_sink, // @[MSHR.scala:86:14] input io_sinkd_bits_denied, // @[MSHR.scala:86:14] input io_sinke_valid, // @[MSHR.scala:86:14] input [3:0] io_sinke_bits_sink, // @[MSHR.scala:86:14] input [10:0] io_nestedwb_set, // @[MSHR.scala:86:14] input [8:0] io_nestedwb_tag, // @[MSHR.scala:86:14] input io_nestedwb_b_toN, // @[MSHR.scala:86:14] input io_nestedwb_b_toB, // @[MSHR.scala:86:14] input io_nestedwb_b_clr_dirty, // @[MSHR.scala:86:14] input io_nestedwb_c_set_dirty // @[MSHR.scala:86:14] ); wire [8:0] final_meta_writeback_tag; // @[MSHR.scala:215:38] wire final_meta_writeback_clients; // @[MSHR.scala:215:38] wire [1:0] final_meta_writeback_state; // @[MSHR.scala:215:38] wire final_meta_writeback_dirty; // @[MSHR.scala:215:38] wire io_allocate_valid_0 = io_allocate_valid; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_0_0 = io_allocate_bits_prio_0; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_1_0 = io_allocate_bits_prio_1; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_2_0 = io_allocate_bits_prio_2; // @[MSHR.scala:84:7] wire io_allocate_bits_control_0 = io_allocate_bits_control; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_opcode_0 = io_allocate_bits_opcode; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_param_0 = io_allocate_bits_param; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_size_0 = io_allocate_bits_size; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_source_0 = io_allocate_bits_source; // @[MSHR.scala:84:7] wire [8:0] io_allocate_bits_tag_0 = io_allocate_bits_tag; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_offset_0 = io_allocate_bits_offset; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_put_0 = io_allocate_bits_put; // @[MSHR.scala:84:7] wire [10:0] io_allocate_bits_set_0 = io_allocate_bits_set; // @[MSHR.scala:84:7] wire io_allocate_bits_repeat_0 = io_allocate_bits_repeat; // @[MSHR.scala:84:7] wire io_directory_valid_0 = io_directory_valid; // @[MSHR.scala:84:7] wire io_directory_bits_dirty_0 = io_directory_bits_dirty; // @[MSHR.scala:84:7] wire [1:0] io_directory_bits_state_0 = io_directory_bits_state; // @[MSHR.scala:84:7] wire io_directory_bits_clients_0 = io_directory_bits_clients; // @[MSHR.scala:84:7] wire [8:0] io_directory_bits_tag_0 = io_directory_bits_tag; // @[MSHR.scala:84:7] wire io_directory_bits_hit_0 = io_directory_bits_hit; // @[MSHR.scala:84:7] wire [3:0] io_directory_bits_way_0 = io_directory_bits_way; // @[MSHR.scala:84:7] wire io_schedule_ready_0 = io_schedule_ready; // @[MSHR.scala:84:7] wire io_sinkc_valid_0 = io_sinkc_valid; // @[MSHR.scala:84:7] wire io_sinkc_bits_last_0 = io_sinkc_bits_last; // @[MSHR.scala:84:7] wire [10:0] io_sinkc_bits_set_0 = io_sinkc_bits_set; // @[MSHR.scala:84:7] wire [8:0] io_sinkc_bits_tag_0 = io_sinkc_bits_tag; // @[MSHR.scala:84:7] wire [5:0] io_sinkc_bits_source_0 = io_sinkc_bits_source; // @[MSHR.scala:84:7] wire [2:0] io_sinkc_bits_param_0 = io_sinkc_bits_param; // @[MSHR.scala:84:7] wire io_sinkc_bits_data_0 = io_sinkc_bits_data; // @[MSHR.scala:84:7] wire io_sinkd_valid_0 = io_sinkd_valid; // @[MSHR.scala:84:7] wire io_sinkd_bits_last_0 = io_sinkd_bits_last; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_opcode_0 = io_sinkd_bits_opcode; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_param_0 = io_sinkd_bits_param; // @[MSHR.scala:84:7] wire [3:0] io_sinkd_bits_source_0 = io_sinkd_bits_source; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_sink_0 = io_sinkd_bits_sink; // @[MSHR.scala:84:7] wire io_sinkd_bits_denied_0 = io_sinkd_bits_denied; // @[MSHR.scala:84:7] wire io_sinke_valid_0 = io_sinke_valid; // @[MSHR.scala:84:7] wire [3:0] io_sinke_bits_sink_0 = io_sinke_bits_sink; // @[MSHR.scala:84:7] wire [10:0] io_nestedwb_set_0 = io_nestedwb_set; // @[MSHR.scala:84:7] wire [8:0] io_nestedwb_tag_0 = io_nestedwb_tag; // @[MSHR.scala:84:7] wire io_nestedwb_b_toN_0 = io_nestedwb_b_toN; // @[MSHR.scala:84:7] wire io_nestedwb_b_toB_0 = io_nestedwb_b_toB; // @[MSHR.scala:84:7] wire io_nestedwb_b_clr_dirty_0 = io_nestedwb_b_clr_dirty; // @[MSHR.scala:84:7] wire io_nestedwb_c_set_dirty_0 = io_nestedwb_c_set_dirty; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_a_bits_source = 4'h0; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_c_bits_source = 4'h0; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_d_bits_sink = 4'h0; // @[MSHR.scala:84:7] wire io_schedule_bits_x_bits_fail = 1'h0; // @[MSHR.scala:84:7] wire _io_schedule_bits_c_valid_T_2 = 1'h0; // @[MSHR.scala:186:68] wire _io_schedule_bits_c_valid_T_3 = 1'h0; // @[MSHR.scala:186:80] wire invalid_dirty = 1'h0; // @[MSHR.scala:268:21] wire invalid_clients = 1'h0; // @[MSHR.scala:268:21] wire _excluded_client_T_7 = 1'h0; // @[Parameters.scala:279:137] wire _after_T_4 = 1'h0; // @[MSHR.scala:323:11] wire _new_skipProbe_T_6 = 1'h0; // @[Parameters.scala:279:137] wire _prior_T_4 = 1'h0; // @[MSHR.scala:323:11] wire [8:0] invalid_tag = 9'h0; // @[MSHR.scala:268:21] wire [1:0] invalid_state = 2'h0; // @[MSHR.scala:268:21] wire [1:0] _final_meta_writeback_state_T_11 = 2'h1; // @[MSHR.scala:240:70] wire allocate_as_full_prio_0 = io_allocate_bits_prio_0_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_prio_1 = io_allocate_bits_prio_1_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_prio_2 = io_allocate_bits_prio_2_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_control = io_allocate_bits_control_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_opcode = io_allocate_bits_opcode_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_param = io_allocate_bits_param_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_size = io_allocate_bits_size_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_source = io_allocate_bits_source_0; // @[MSHR.scala:84:7, :504:34] wire [8:0] allocate_as_full_tag = io_allocate_bits_tag_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_offset = io_allocate_bits_offset_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_put = io_allocate_bits_put_0; // @[MSHR.scala:84:7, :504:34] wire [10:0] allocate_as_full_set = io_allocate_bits_set_0; // @[MSHR.scala:84:7, :504:34] wire _io_status_bits_blockB_T_8; // @[MSHR.scala:168:40] wire _io_status_bits_nestB_T_4; // @[MSHR.scala:169:93] wire _io_status_bits_blockC_T; // @[MSHR.scala:172:28] wire _io_status_bits_nestC_T_5; // @[MSHR.scala:173:39] wire _io_schedule_valid_T_5; // @[MSHR.scala:193:105] wire _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:184:55] wire _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:283:91] wire _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:185:41] wire [2:0] _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:286:41] wire [8:0] _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:287:41] wire _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:289:51] wire _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:186:64] wire [2:0] _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:290:41] wire [2:0] _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:291:41] wire _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:187:57] wire [2:0] _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:298:41] wire _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:188:43] wire _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:189:40] wire _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:190:66] wire _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:310:41] wire [1:0] _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:310:41] wire _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:310:41] wire [8:0] _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:310:41] wire no_wait; // @[MSHR.scala:183:83] wire [10:0] io_status_bits_set_0; // @[MSHR.scala:84:7] wire [8:0] io_status_bits_tag_0; // @[MSHR.scala:84:7] wire [3:0] io_status_bits_way_0; // @[MSHR.scala:84:7] wire io_status_bits_blockB_0; // @[MSHR.scala:84:7] wire io_status_bits_nestB_0; // @[MSHR.scala:84:7] wire io_status_bits_blockC_0; // @[MSHR.scala:84:7] wire io_status_bits_nestC_0; // @[MSHR.scala:84:7] wire io_status_valid_0; // @[MSHR.scala:84:7] wire [8:0] io_schedule_bits_a_bits_tag_0; // @[MSHR.scala:84:7] wire [10:0] io_schedule_bits_a_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_a_bits_param_0; // @[MSHR.scala:84:7] wire io_schedule_bits_a_bits_block_0; // @[MSHR.scala:84:7] wire io_schedule_bits_a_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_b_bits_param_0; // @[MSHR.scala:84:7] wire [8:0] io_schedule_bits_b_bits_tag_0; // @[MSHR.scala:84:7] wire [10:0] io_schedule_bits_b_bits_set_0; // @[MSHR.scala:84:7] wire io_schedule_bits_b_bits_clients_0; // @[MSHR.scala:84:7] wire io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_opcode_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_param_0; // @[MSHR.scala:84:7] wire [8:0] io_schedule_bits_c_bits_tag_0; // @[MSHR.scala:84:7] wire [10:0] io_schedule_bits_c_bits_set_0; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_c_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_c_bits_dirty_0; // @[MSHR.scala:84:7] wire io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_0_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_1_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_2_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_control_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_opcode_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_param_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_size_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_source_0; // @[MSHR.scala:84:7] wire [8:0] io_schedule_bits_d_bits_tag_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_offset_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_put_0; // @[MSHR.scala:84:7] wire [10:0] io_schedule_bits_d_bits_set_0; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_d_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_bad_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_e_bits_sink_0; // @[MSHR.scala:84:7] wire io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_bits_data_dirty_0; // @[MSHR.scala:84:7] wire [1:0] io_schedule_bits_dir_bits_data_state_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_bits_data_clients_0; // @[MSHR.scala:84:7] wire [8:0] io_schedule_bits_dir_bits_data_tag_0; // @[MSHR.scala:84:7] wire [10:0] io_schedule_bits_dir_bits_set_0; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_dir_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_reload_0; // @[MSHR.scala:84:7] wire io_schedule_valid_0; // @[MSHR.scala:84:7] reg request_valid; // @[MSHR.scala:97:30] assign io_status_valid_0 = request_valid; // @[MSHR.scala:84:7, :97:30] reg request_prio_0; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_0_0 = request_prio_0; // @[MSHR.scala:84:7, :98:20] reg request_prio_1; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_1_0 = request_prio_1; // @[MSHR.scala:84:7, :98:20] reg request_prio_2; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_2_0 = request_prio_2; // @[MSHR.scala:84:7, :98:20] reg request_control; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_control_0 = request_control; // @[MSHR.scala:84:7, :98:20] reg [2:0] request_opcode; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_opcode_0 = request_opcode; // @[MSHR.scala:84:7, :98:20] reg [2:0] request_param; // @[MSHR.scala:98:20] reg [2:0] request_size; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_size_0 = request_size; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_source; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_source_0 = request_source; // @[MSHR.scala:84:7, :98:20] reg [8:0] request_tag; // @[MSHR.scala:98:20] assign io_status_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_a_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_d_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_offset; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_offset_0 = request_offset; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_put; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_put_0 = request_put; // @[MSHR.scala:84:7, :98:20] reg [10:0] request_set; // @[MSHR.scala:98:20] assign io_status_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_a_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_b_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_c_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_d_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_dir_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] reg meta_valid; // @[MSHR.scala:99:27] reg meta_dirty; // @[MSHR.scala:100:17] assign io_schedule_bits_c_bits_dirty_0 = meta_dirty; // @[MSHR.scala:84:7, :100:17] reg [1:0] meta_state; // @[MSHR.scala:100:17] reg meta_clients; // @[MSHR.scala:100:17] wire _meta_no_clients_T = meta_clients; // @[MSHR.scala:100:17, :220:39] wire evict_c = meta_clients; // @[MSHR.scala:100:17, :315:27] wire before_c = meta_clients; // @[MSHR.scala:100:17, :315:27] reg [8:0] meta_tag; // @[MSHR.scala:100:17] assign io_schedule_bits_c_bits_tag_0 = meta_tag; // @[MSHR.scala:84:7, :100:17] reg meta_hit; // @[MSHR.scala:100:17] reg [3:0] meta_way; // @[MSHR.scala:100:17] assign io_status_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_c_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_d_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_dir_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] wire [3:0] final_meta_writeback_way = meta_way; // @[MSHR.scala:100:17, :215:38] reg s_rprobe; // @[MSHR.scala:121:33] reg w_rprobeackfirst; // @[MSHR.scala:122:33] reg w_rprobeacklast; // @[MSHR.scala:123:33] reg s_release; // @[MSHR.scala:124:33] reg w_releaseack; // @[MSHR.scala:125:33] reg s_pprobe; // @[MSHR.scala:126:33] reg s_acquire; // @[MSHR.scala:127:33] reg s_flush; // @[MSHR.scala:128:33] reg w_grantfirst; // @[MSHR.scala:129:33] reg w_grantlast; // @[MSHR.scala:130:33] reg w_grant; // @[MSHR.scala:131:33] reg w_pprobeackfirst; // @[MSHR.scala:132:33] reg w_pprobeacklast; // @[MSHR.scala:133:33] reg w_pprobeack; // @[MSHR.scala:134:33] reg s_grantack; // @[MSHR.scala:136:33] reg s_execute; // @[MSHR.scala:137:33] reg w_grantack; // @[MSHR.scala:138:33] reg s_writeback; // @[MSHR.scala:139:33] reg [2:0] sink; // @[MSHR.scala:147:17] assign io_schedule_bits_e_bits_sink_0 = sink; // @[MSHR.scala:84:7, :147:17] reg gotT; // @[MSHR.scala:148:17] reg bad_grant; // @[MSHR.scala:149:22] assign io_schedule_bits_d_bits_bad_0 = bad_grant; // @[MSHR.scala:84:7, :149:22] reg probes_done; // @[MSHR.scala:150:24] reg probes_toN; // @[MSHR.scala:151:23] reg probes_noT; // @[MSHR.scala:152:23] wire _io_status_bits_blockB_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28] wire _io_status_bits_blockB_T_1 = ~w_releaseack; // @[MSHR.scala:125:33, :168:45] wire _io_status_bits_blockB_T_2 = ~w_rprobeacklast; // @[MSHR.scala:123:33, :168:62] wire _io_status_bits_blockB_T_3 = _io_status_bits_blockB_T_1 | _io_status_bits_blockB_T_2; // @[MSHR.scala:168:{45,59,62}] wire _io_status_bits_blockB_T_4 = ~w_pprobeacklast; // @[MSHR.scala:133:33, :168:82] wire _io_status_bits_blockB_T_5 = _io_status_bits_blockB_T_3 | _io_status_bits_blockB_T_4; // @[MSHR.scala:168:{59,79,82}] wire _io_status_bits_blockB_T_6 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103] wire _io_status_bits_blockB_T_7 = _io_status_bits_blockB_T_5 & _io_status_bits_blockB_T_6; // @[MSHR.scala:168:{79,100,103}] assign _io_status_bits_blockB_T_8 = _io_status_bits_blockB_T | _io_status_bits_blockB_T_7; // @[MSHR.scala:168:{28,40,100}] assign io_status_bits_blockB_0 = _io_status_bits_blockB_T_8; // @[MSHR.scala:84:7, :168:40] wire _io_status_bits_nestB_T = meta_valid & w_releaseack; // @[MSHR.scala:99:27, :125:33, :169:39] wire _io_status_bits_nestB_T_1 = _io_status_bits_nestB_T & w_rprobeacklast; // @[MSHR.scala:123:33, :169:{39,55}] wire _io_status_bits_nestB_T_2 = _io_status_bits_nestB_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :169:{55,74}] wire _io_status_bits_nestB_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :169:96] assign _io_status_bits_nestB_T_4 = _io_status_bits_nestB_T_2 & _io_status_bits_nestB_T_3; // @[MSHR.scala:169:{74,93,96}] assign io_status_bits_nestB_0 = _io_status_bits_nestB_T_4; // @[MSHR.scala:84:7, :169:93] assign _io_status_bits_blockC_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28, :172:28] assign io_status_bits_blockC_0 = _io_status_bits_blockC_T; // @[MSHR.scala:84:7, :172:28] wire _io_status_bits_nestC_T = ~w_rprobeackfirst; // @[MSHR.scala:122:33, :173:43] wire _io_status_bits_nestC_T_1 = ~w_pprobeackfirst; // @[MSHR.scala:132:33, :173:64] wire _io_status_bits_nestC_T_2 = _io_status_bits_nestC_T | _io_status_bits_nestC_T_1; // @[MSHR.scala:173:{43,61,64}] wire _io_status_bits_nestC_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :173:85] wire _io_status_bits_nestC_T_4 = _io_status_bits_nestC_T_2 | _io_status_bits_nestC_T_3; // @[MSHR.scala:173:{61,82,85}] assign _io_status_bits_nestC_T_5 = meta_valid & _io_status_bits_nestC_T_4; // @[MSHR.scala:99:27, :173:{39,82}] assign io_status_bits_nestC_0 = _io_status_bits_nestC_T_5; // @[MSHR.scala:84:7, :173:39] wire _no_wait_T = w_rprobeacklast & w_releaseack; // @[MSHR.scala:123:33, :125:33, :183:33] wire _no_wait_T_1 = _no_wait_T & w_grantlast; // @[MSHR.scala:130:33, :183:{33,49}] wire _no_wait_T_2 = _no_wait_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :183:{49,64}] assign no_wait = _no_wait_T_2 & w_grantack; // @[MSHR.scala:138:33, :183:{64,83}] assign io_schedule_bits_reload_0 = no_wait; // @[MSHR.scala:84:7, :183:83] wire _io_schedule_bits_a_valid_T = ~s_acquire; // @[MSHR.scala:127:33, :184:31] wire _io_schedule_bits_a_valid_T_1 = _io_schedule_bits_a_valid_T & s_release; // @[MSHR.scala:124:33, :184:{31,42}] assign _io_schedule_bits_a_valid_T_2 = _io_schedule_bits_a_valid_T_1 & s_pprobe; // @[MSHR.scala:126:33, :184:{42,55}] assign io_schedule_bits_a_valid_0 = _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:84:7, :184:55] wire _io_schedule_bits_b_valid_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31] wire _io_schedule_bits_b_valid_T_1 = ~s_pprobe; // @[MSHR.scala:126:33, :185:44] assign _io_schedule_bits_b_valid_T_2 = _io_schedule_bits_b_valid_T | _io_schedule_bits_b_valid_T_1; // @[MSHR.scala:185:{31,41,44}] assign io_schedule_bits_b_valid_0 = _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:84:7, :185:41] wire _io_schedule_bits_c_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32] wire _io_schedule_bits_c_valid_T_1 = _io_schedule_bits_c_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :186:{32,43}] assign _io_schedule_bits_c_valid_T_4 = _io_schedule_bits_c_valid_T_1; // @[MSHR.scala:186:{43,64}] assign io_schedule_bits_c_valid_0 = _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:84:7, :186:64] wire _io_schedule_bits_d_valid_T = ~s_execute; // @[MSHR.scala:137:33, :187:31] wire _io_schedule_bits_d_valid_T_1 = _io_schedule_bits_d_valid_T & w_pprobeack; // @[MSHR.scala:134:33, :187:{31,42}] assign _io_schedule_bits_d_valid_T_2 = _io_schedule_bits_d_valid_T_1 & w_grant; // @[MSHR.scala:131:33, :187:{42,57}] assign io_schedule_bits_d_valid_0 = _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:84:7, :187:57] wire _io_schedule_bits_e_valid_T = ~s_grantack; // @[MSHR.scala:136:33, :188:31] assign _io_schedule_bits_e_valid_T_1 = _io_schedule_bits_e_valid_T & w_grantfirst; // @[MSHR.scala:129:33, :188:{31,43}] assign io_schedule_bits_e_valid_0 = _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:84:7, :188:43] wire _io_schedule_bits_x_valid_T = ~s_flush; // @[MSHR.scala:128:33, :189:31] assign _io_schedule_bits_x_valid_T_1 = _io_schedule_bits_x_valid_T & w_releaseack; // @[MSHR.scala:125:33, :189:{31,40}] assign io_schedule_bits_x_valid_0 = _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:84:7, :189:40] wire _io_schedule_bits_dir_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :190:34] wire _io_schedule_bits_dir_valid_T_1 = _io_schedule_bits_dir_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :190:{34,45}] wire _io_schedule_bits_dir_valid_T_2 = ~s_writeback; // @[MSHR.scala:139:33, :190:70] wire _io_schedule_bits_dir_valid_T_3 = _io_schedule_bits_dir_valid_T_2 & no_wait; // @[MSHR.scala:183:83, :190:{70,83}] assign _io_schedule_bits_dir_valid_T_4 = _io_schedule_bits_dir_valid_T_1 | _io_schedule_bits_dir_valid_T_3; // @[MSHR.scala:190:{45,66,83}] assign io_schedule_bits_dir_valid_0 = _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:84:7, :190:66] wire _io_schedule_valid_T = io_schedule_bits_a_valid_0 | io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7, :192:49] wire _io_schedule_valid_T_1 = _io_schedule_valid_T | io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7, :192:{49,77}] wire _io_schedule_valid_T_2 = _io_schedule_valid_T_1 | io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7, :192:{77,105}] wire _io_schedule_valid_T_3 = _io_schedule_valid_T_2 | io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7, :192:105, :193:49] wire _io_schedule_valid_T_4 = _io_schedule_valid_T_3 | io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7, :193:{49,77}] assign _io_schedule_valid_T_5 = _io_schedule_valid_T_4 | io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7, :193:{77,105}] assign io_schedule_valid_0 = _io_schedule_valid_T_5; // @[MSHR.scala:84:7, :193:105] wire _io_schedule_bits_dir_bits_data_WIRE_dirty = final_meta_writeback_dirty; // @[MSHR.scala:215:38, :310:71] wire [1:0] _io_schedule_bits_dir_bits_data_WIRE_state = final_meta_writeback_state; // @[MSHR.scala:215:38, :310:71] wire _io_schedule_bits_dir_bits_data_WIRE_clients = final_meta_writeback_clients; // @[MSHR.scala:215:38, :310:71] wire after_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27] wire prior_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27] wire [8:0] _io_schedule_bits_dir_bits_data_WIRE_tag = final_meta_writeback_tag; // @[MSHR.scala:215:38, :310:71] wire final_meta_writeback_hit; // @[MSHR.scala:215:38] wire req_clientBit = request_source == 6'h28; // @[Parameters.scala:46:9] wire _req_needT_T = request_opcode[2]; // @[Parameters.scala:269:12] wire _final_meta_writeback_dirty_T_3 = request_opcode[2]; // @[Parameters.scala:269:12] wire _req_needT_T_1 = ~_req_needT_T; // @[Parameters.scala:269:{5,12}] wire _GEN = request_opcode == 3'h5; // @[Parameters.scala:270:13] wire _req_needT_T_2; // @[Parameters.scala:270:13] assign _req_needT_T_2 = _GEN; // @[Parameters.scala:270:13] wire _excluded_client_T_6; // @[Parameters.scala:279:117] assign _excluded_client_T_6 = _GEN; // @[Parameters.scala:270:13, :279:117] wire _GEN_0 = request_param == 3'h1; // @[Parameters.scala:270:42] wire _req_needT_T_3; // @[Parameters.scala:270:42] assign _req_needT_T_3 = _GEN_0; // @[Parameters.scala:270:42] wire _final_meta_writeback_clients_T; // @[Parameters.scala:282:11] assign _final_meta_writeback_clients_T = _GEN_0; // @[Parameters.scala:270:42, :282:11] wire _io_schedule_bits_d_bits_param_T_7; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_7 = _GEN_0; // @[Parameters.scala:270:42] wire _req_needT_T_4 = _req_needT_T_2 & _req_needT_T_3; // @[Parameters.scala:270:{13,33,42}] wire _req_needT_T_5 = _req_needT_T_1 | _req_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33] wire _GEN_1 = request_opcode == 3'h6; // @[Parameters.scala:271:14] wire _req_needT_T_6; // @[Parameters.scala:271:14] assign _req_needT_T_6 = _GEN_1; // @[Parameters.scala:271:14] wire _req_acquire_T; // @[MSHR.scala:219:36] assign _req_acquire_T = _GEN_1; // @[Parameters.scala:271:14] wire _excluded_client_T_1; // @[Parameters.scala:279:12] assign _excluded_client_T_1 = _GEN_1; // @[Parameters.scala:271:14, :279:12] wire _req_needT_T_7 = &request_opcode; // @[Parameters.scala:271:52] wire _req_needT_T_8 = _req_needT_T_6 | _req_needT_T_7; // @[Parameters.scala:271:{14,42,52}] wire _req_needT_T_9 = |request_param; // @[Parameters.scala:271:89] wire _req_needT_T_10 = _req_needT_T_8 & _req_needT_T_9; // @[Parameters.scala:271:{42,80,89}] wire req_needT = _req_needT_T_5 | _req_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80] wire _req_acquire_T_1 = &request_opcode; // @[Parameters.scala:271:52] wire req_acquire = _req_acquire_T | _req_acquire_T_1; // @[MSHR.scala:219:{36,53,71}] wire meta_no_clients = ~_meta_no_clients_T; // @[MSHR.scala:220:{25,39}] wire _req_promoteT_T = &meta_state; // @[MSHR.scala:100:17, :221:81] wire _req_promoteT_T_1 = meta_no_clients & _req_promoteT_T; // @[MSHR.scala:220:25, :221:{67,81}] wire _req_promoteT_T_2 = meta_hit ? _req_promoteT_T_1 : gotT; // @[MSHR.scala:100:17, :148:17, :221:{40,67}] wire req_promoteT = req_acquire & _req_promoteT_T_2; // @[MSHR.scala:219:53, :221:{34,40}] wire _final_meta_writeback_dirty_T = request_opcode[0]; // @[MSHR.scala:98:20, :224:65] wire _final_meta_writeback_dirty_T_1 = meta_dirty | _final_meta_writeback_dirty_T; // @[MSHR.scala:100:17, :224:{48,65}] wire _final_meta_writeback_state_T = request_param != 3'h3; // @[MSHR.scala:98:20, :225:55] wire _GEN_2 = meta_state == 2'h2; // @[MSHR.scala:100:17, :225:78] wire _final_meta_writeback_state_T_1; // @[MSHR.scala:225:78] assign _final_meta_writeback_state_T_1 = _GEN_2; // @[MSHR.scala:225:78] wire _final_meta_writeback_state_T_12; // @[MSHR.scala:240:70] assign _final_meta_writeback_state_T_12 = _GEN_2; // @[MSHR.scala:225:78, :240:70] wire _evict_T_2; // @[MSHR.scala:317:26] assign _evict_T_2 = _GEN_2; // @[MSHR.scala:225:78, :317:26] wire _before_T_1; // @[MSHR.scala:317:26] assign _before_T_1 = _GEN_2; // @[MSHR.scala:225:78, :317:26] wire _final_meta_writeback_state_T_2 = _final_meta_writeback_state_T & _final_meta_writeback_state_T_1; // @[MSHR.scala:225:{55,64,78}] wire [1:0] _final_meta_writeback_state_T_3 = _final_meta_writeback_state_T_2 ? 2'h3 : meta_state; // @[MSHR.scala:100:17, :225:{40,64}] wire _GEN_3 = request_param == 3'h2; // @[Parameters.scala:282:43] wire _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:43] assign _final_meta_writeback_clients_T_1 = _GEN_3; // @[Parameters.scala:282:43] wire _io_schedule_bits_d_bits_param_T_5; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_5 = _GEN_3; // @[Parameters.scala:282:43] wire _final_meta_writeback_clients_T_2 = _final_meta_writeback_clients_T | _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:{11,34,43}] wire _final_meta_writeback_clients_T_3 = request_param == 3'h5; // @[Parameters.scala:282:75] wire _final_meta_writeback_clients_T_4 = _final_meta_writeback_clients_T_2 | _final_meta_writeback_clients_T_3; // @[Parameters.scala:282:{34,66,75}] wire _final_meta_writeback_clients_T_5 = _final_meta_writeback_clients_T_4 & req_clientBit; // @[Parameters.scala:46:9] wire _final_meta_writeback_clients_T_6 = ~_final_meta_writeback_clients_T_5; // @[MSHR.scala:226:{52,56}] wire _final_meta_writeback_clients_T_7 = meta_clients & _final_meta_writeback_clients_T_6; // @[MSHR.scala:100:17, :226:{50,52}] wire _final_meta_writeback_clients_T_8 = ~probes_toN; // @[MSHR.scala:151:23, :232:54] wire _final_meta_writeback_clients_T_9 = meta_clients & _final_meta_writeback_clients_T_8; // @[MSHR.scala:100:17, :232:{52,54}] wire _final_meta_writeback_dirty_T_2 = meta_hit & meta_dirty; // @[MSHR.scala:100:17, :236:45] wire _final_meta_writeback_dirty_T_4 = ~_final_meta_writeback_dirty_T_3; // @[MSHR.scala:236:{63,78}] wire _final_meta_writeback_dirty_T_5 = _final_meta_writeback_dirty_T_2 | _final_meta_writeback_dirty_T_4; // @[MSHR.scala:236:{45,60,63}] wire [1:0] _GEN_4 = {1'h1, ~req_acquire}; // @[MSHR.scala:219:53, :238:40] wire [1:0] _final_meta_writeback_state_T_4; // @[MSHR.scala:238:40] assign _final_meta_writeback_state_T_4 = _GEN_4; // @[MSHR.scala:238:40] wire [1:0] _final_meta_writeback_state_T_6; // @[MSHR.scala:239:65] assign _final_meta_writeback_state_T_6 = _GEN_4; // @[MSHR.scala:238:40, :239:65] wire _final_meta_writeback_state_T_5 = ~meta_hit; // @[MSHR.scala:100:17, :239:41] wire [1:0] _final_meta_writeback_state_T_7 = gotT ? _final_meta_writeback_state_T_6 : 2'h1; // @[MSHR.scala:148:17, :239:{55,65}] wire _final_meta_writeback_state_T_8 = meta_no_clients & req_acquire; // @[MSHR.scala:219:53, :220:25, :244:72] wire [1:0] _final_meta_writeback_state_T_9 = {1'h1, ~_final_meta_writeback_state_T_8}; // @[MSHR.scala:244:{55,72}] wire _GEN_5 = meta_state == 2'h1; // @[MSHR.scala:100:17, :240:70] wire _final_meta_writeback_state_T_10; // @[MSHR.scala:240:70] assign _final_meta_writeback_state_T_10 = _GEN_5; // @[MSHR.scala:240:70] wire _io_schedule_bits_c_bits_param_T; // @[MSHR.scala:291:53] assign _io_schedule_bits_c_bits_param_T = _GEN_5; // @[MSHR.scala:240:70, :291:53] wire _evict_T_1; // @[MSHR.scala:317:26] assign _evict_T_1 = _GEN_5; // @[MSHR.scala:240:70, :317:26] wire _before_T; // @[MSHR.scala:317:26] assign _before_T = _GEN_5; // @[MSHR.scala:240:70, :317:26] wire [1:0] _final_meta_writeback_state_T_13 = {_final_meta_writeback_state_T_12, 1'h1}; // @[MSHR.scala:240:70] wire _final_meta_writeback_state_T_14 = &meta_state; // @[MSHR.scala:100:17, :221:81, :240:70] wire [1:0] _final_meta_writeback_state_T_15 = _final_meta_writeback_state_T_14 ? _final_meta_writeback_state_T_9 : _final_meta_writeback_state_T_13; // @[MSHR.scala:240:70, :244:55] wire [1:0] _final_meta_writeback_state_T_16 = _final_meta_writeback_state_T_5 ? _final_meta_writeback_state_T_7 : _final_meta_writeback_state_T_15; // @[MSHR.scala:239:{40,41,55}, :240:70] wire [1:0] _final_meta_writeback_state_T_17 = req_needT ? _final_meta_writeback_state_T_4 : _final_meta_writeback_state_T_16; // @[Parameters.scala:270:70] wire _final_meta_writeback_clients_T_10 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :245:66] wire _final_meta_writeback_clients_T_11 = meta_clients & _final_meta_writeback_clients_T_10; // @[MSHR.scala:100:17, :245:{64,66}] wire _final_meta_writeback_clients_T_12 = meta_hit & _final_meta_writeback_clients_T_11; // @[MSHR.scala:100:17, :245:{40,64}] wire _final_meta_writeback_clients_T_13 = req_acquire & req_clientBit; // @[Parameters.scala:46:9] wire _final_meta_writeback_clients_T_14 = _final_meta_writeback_clients_T_12 | _final_meta_writeback_clients_T_13; // @[MSHR.scala:245:{40,84}, :246:40] assign final_meta_writeback_tag = request_prio_2 | request_control ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :215:38, :223:52, :228:53, :247:30] wire _final_meta_writeback_clients_T_15 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :258:54] wire _final_meta_writeback_clients_T_16 = meta_clients & _final_meta_writeback_clients_T_15; // @[MSHR.scala:100:17, :258:{52,54}] assign final_meta_writeback_hit = bad_grant ? meta_hit : request_prio_2 | ~request_control; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :227:34, :228:53, :234:30, :248:30, :251:20, :252:21] assign final_meta_writeback_dirty = ~bad_grant & (request_prio_2 ? _final_meta_writeback_dirty_T_1 : request_control ? ~meta_hit & meta_dirty : _final_meta_writeback_dirty_T_5); // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :224:{34,48}, :228:53, :229:21, :230:36, :236:{32,60}, :251:20, :252:21] assign final_meta_writeback_state = bad_grant ? {1'h0, meta_hit} : request_prio_2 ? _final_meta_writeback_state_T_3 : request_control ? (meta_hit ? 2'h0 : meta_state) : _final_meta_writeback_state_T_17; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :225:{34,40}, :228:53, :229:21, :231:36, :237:{32,38}, :251:20, :252:21, :257:36, :263:36] assign final_meta_writeback_clients = bad_grant ? meta_hit & _final_meta_writeback_clients_T_16 : request_prio_2 ? _final_meta_writeback_clients_T_7 : request_control ? (meta_hit ? _final_meta_writeback_clients_T_9 : meta_clients) : _final_meta_writeback_clients_T_14; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :226:{34,50}, :228:53, :229:21, :232:{36,52}, :245:{34,84}, :251:20, :252:21, :258:{36,52}, :264:36] wire _honour_BtoT_T = meta_clients & req_clientBit; // @[Parameters.scala:46:9] wire _honour_BtoT_T_1 = _honour_BtoT_T; // @[MSHR.scala:276:{47,64}] wire honour_BtoT = meta_hit & _honour_BtoT_T_1; // @[MSHR.scala:100:17, :276:{30,64}] wire _excluded_client_T = meta_hit & request_prio_0; // @[MSHR.scala:98:20, :100:17, :279:38] wire _excluded_client_T_2 = &request_opcode; // @[Parameters.scala:271:52, :279:50] wire _excluded_client_T_3 = _excluded_client_T_1 | _excluded_client_T_2; // @[Parameters.scala:279:{12,40,50}] wire _excluded_client_T_4 = request_opcode == 3'h4; // @[Parameters.scala:279:87] wire _excluded_client_T_5 = _excluded_client_T_3 | _excluded_client_T_4; // @[Parameters.scala:279:{40,77,87}] wire _excluded_client_T_8 = _excluded_client_T_5; // @[Parameters.scala:279:{77,106}] wire _excluded_client_T_9 = _excluded_client_T & _excluded_client_T_8; // @[Parameters.scala:279:106] wire excluded_client = _excluded_client_T_9 & req_clientBit; // @[Parameters.scala:46:9] wire [1:0] _io_schedule_bits_a_bits_param_T = meta_hit ? 2'h2 : 2'h1; // @[MSHR.scala:100:17, :282:56] wire [1:0] _io_schedule_bits_a_bits_param_T_1 = req_needT ? _io_schedule_bits_a_bits_param_T : 2'h0; // @[Parameters.scala:270:70] assign io_schedule_bits_a_bits_param_0 = {1'h0, _io_schedule_bits_a_bits_param_T_1}; // @[MSHR.scala:84:7, :282:{35,41}] wire _io_schedule_bits_a_bits_block_T = request_size != 3'h6; // @[MSHR.scala:98:20, :283:51] wire _io_schedule_bits_a_bits_block_T_1 = request_opcode == 3'h0; // @[MSHR.scala:98:20, :284:55] wire _io_schedule_bits_a_bits_block_T_2 = &request_opcode; // @[Parameters.scala:271:52] wire _io_schedule_bits_a_bits_block_T_3 = _io_schedule_bits_a_bits_block_T_1 | _io_schedule_bits_a_bits_block_T_2; // @[MSHR.scala:284:{55,71,89}] wire _io_schedule_bits_a_bits_block_T_4 = ~_io_schedule_bits_a_bits_block_T_3; // @[MSHR.scala:284:{38,71}] assign _io_schedule_bits_a_bits_block_T_5 = _io_schedule_bits_a_bits_block_T | _io_schedule_bits_a_bits_block_T_4; // @[MSHR.scala:283:{51,91}, :284:38] assign io_schedule_bits_a_bits_block_0 = _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:84:7, :283:91] wire _io_schedule_bits_b_bits_param_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :286:42] wire [1:0] _io_schedule_bits_b_bits_param_T_1 = req_needT ? 2'h2 : 2'h1; // @[Parameters.scala:270:70] wire [2:0] _io_schedule_bits_b_bits_param_T_2 = request_prio_1 ? request_param : {1'h0, _io_schedule_bits_b_bits_param_T_1}; // @[MSHR.scala:98:20, :286:{61,97}] assign _io_schedule_bits_b_bits_param_T_3 = _io_schedule_bits_b_bits_param_T ? 3'h2 : _io_schedule_bits_b_bits_param_T_2; // @[MSHR.scala:286:{41,42,61}] assign io_schedule_bits_b_bits_param_0 = _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:84:7, :286:41] wire _io_schedule_bits_b_bits_tag_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :287:42] assign _io_schedule_bits_b_bits_tag_T_1 = _io_schedule_bits_b_bits_tag_T ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :287:{41,42}] assign io_schedule_bits_b_bits_tag_0 = _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:84:7, :287:41] wire _io_schedule_bits_b_bits_clients_T = ~excluded_client; // @[MSHR.scala:279:28, :289:53] assign _io_schedule_bits_b_bits_clients_T_1 = meta_clients & _io_schedule_bits_b_bits_clients_T; // @[MSHR.scala:100:17, :289:{51,53}] assign io_schedule_bits_b_bits_clients_0 = _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:84:7, :289:51] assign _io_schedule_bits_c_bits_opcode_T = {2'h3, meta_dirty}; // @[MSHR.scala:100:17, :290:41] assign io_schedule_bits_c_bits_opcode_0 = _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:84:7, :290:41] assign _io_schedule_bits_c_bits_param_T_1 = _io_schedule_bits_c_bits_param_T ? 3'h2 : 3'h1; // @[MSHR.scala:291:{41,53}] assign io_schedule_bits_c_bits_param_0 = _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:84:7, :291:41] wire _io_schedule_bits_d_bits_param_T = ~req_acquire; // @[MSHR.scala:219:53, :298:42] wire [1:0] _io_schedule_bits_d_bits_param_T_1 = {1'h0, req_promoteT}; // @[MSHR.scala:221:34, :300:53] wire [1:0] _io_schedule_bits_d_bits_param_T_2 = honour_BtoT ? 2'h2 : 2'h1; // @[MSHR.scala:276:30, :301:53] wire _io_schedule_bits_d_bits_param_T_3 = ~(|request_param); // @[Parameters.scala:271:89] wire [2:0] _io_schedule_bits_d_bits_param_T_4 = _io_schedule_bits_d_bits_param_T_3 ? {1'h0, _io_schedule_bits_d_bits_param_T_1} : request_param; // @[MSHR.scala:98:20, :299:79, :300:53] wire [2:0] _io_schedule_bits_d_bits_param_T_6 = _io_schedule_bits_d_bits_param_T_5 ? {1'h0, _io_schedule_bits_d_bits_param_T_2} : _io_schedule_bits_d_bits_param_T_4; // @[MSHR.scala:299:79, :301:53] wire [2:0] _io_schedule_bits_d_bits_param_T_8 = _io_schedule_bits_d_bits_param_T_7 ? 3'h1 : _io_schedule_bits_d_bits_param_T_6; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_9 = _io_schedule_bits_d_bits_param_T ? request_param : _io_schedule_bits_d_bits_param_T_8; // @[MSHR.scala:98:20, :298:{41,42}, :299:79] assign io_schedule_bits_d_bits_param_0 = _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:84:7, :298:41] wire _io_schedule_bits_dir_bits_data_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :310:42] assign _io_schedule_bits_dir_bits_data_T_1_dirty = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_dirty; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_state = _io_schedule_bits_dir_bits_data_T ? 2'h0 : _io_schedule_bits_dir_bits_data_WIRE_state; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_clients = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_clients; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_tag = _io_schedule_bits_dir_bits_data_T ? 9'h0 : _io_schedule_bits_dir_bits_data_WIRE_tag; // @[MSHR.scala:310:{41,42,71}] assign io_schedule_bits_dir_bits_data_dirty_0 = _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_state_0 = _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_clients_0 = _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_tag_0 = _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:84:7, :310:41] wire _evict_T = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :338:32] wire [3:0] evict; // @[MSHR.scala:314:26] wire _evict_out_T = ~evict_c; // @[MSHR.scala:315:27, :318:32] wire [1:0] _GEN_6 = {1'h1, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32] wire [1:0] _evict_out_T_1; // @[MSHR.scala:319:32] assign _evict_out_T_1 = _GEN_6; // @[MSHR.scala:319:32] wire [1:0] _before_out_T_1; // @[MSHR.scala:319:32] assign _before_out_T_1 = _GEN_6; // @[MSHR.scala:319:32] wire _evict_T_3 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26] wire [2:0] _GEN_7 = {2'h2, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:39] wire [2:0] _evict_out_T_2; // @[MSHR.scala:320:39] assign _evict_out_T_2 = _GEN_7; // @[MSHR.scala:320:39] wire [2:0] _before_out_T_2; // @[MSHR.scala:320:39] assign _before_out_T_2 = _GEN_7; // @[MSHR.scala:320:39] wire [2:0] _GEN_8 = {2'h3, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:76] wire [2:0] _evict_out_T_3; // @[MSHR.scala:320:76] assign _evict_out_T_3 = _GEN_8; // @[MSHR.scala:320:76] wire [2:0] _before_out_T_3; // @[MSHR.scala:320:76] assign _before_out_T_3 = _GEN_8; // @[MSHR.scala:320:76] wire [2:0] _evict_out_T_4 = evict_c ? _evict_out_T_2 : _evict_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _evict_T_4 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26] wire _evict_T_5 = ~_evict_T; // @[MSHR.scala:323:11, :338:32] assign evict = _evict_T_5 ? 4'h8 : _evict_T_1 ? {3'h0, _evict_out_T} : _evict_T_2 ? {2'h0, _evict_out_T_1} : _evict_T_3 ? {1'h0, _evict_out_T_4} : {_evict_T_4, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}] wire [3:0] before_0; // @[MSHR.scala:314:26] wire _before_out_T = ~before_c; // @[MSHR.scala:315:27, :318:32] wire _before_T_2 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26] wire [2:0] _before_out_T_4 = before_c ? _before_out_T_2 : _before_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _before_T_3 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26] wire _before_T_4 = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :323:11] assign before_0 = _before_T_4 ? 4'h8 : _before_T ? {3'h0, _before_out_T} : _before_T_1 ? {2'h0, _before_out_T_1} : _before_T_2 ? {1'h0, _before_out_T_4} : {_before_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}] wire [3:0] after; // @[MSHR.scala:314:26] wire _GEN_9 = final_meta_writeback_state == 2'h1; // @[MSHR.scala:215:38, :317:26] wire _after_T; // @[MSHR.scala:317:26] assign _after_T = _GEN_9; // @[MSHR.scala:317:26] wire _prior_T; // @[MSHR.scala:317:26] assign _prior_T = _GEN_9; // @[MSHR.scala:317:26] wire _after_out_T = ~after_c; // @[MSHR.scala:315:27, :318:32] wire _GEN_10 = final_meta_writeback_state == 2'h2; // @[MSHR.scala:215:38, :317:26] wire _after_T_1; // @[MSHR.scala:317:26] assign _after_T_1 = _GEN_10; // @[MSHR.scala:317:26] wire _prior_T_1; // @[MSHR.scala:317:26] assign _prior_T_1 = _GEN_10; // @[MSHR.scala:317:26] wire [1:0] _GEN_11 = {1'h1, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32] wire [1:0] _after_out_T_1; // @[MSHR.scala:319:32] assign _after_out_T_1 = _GEN_11; // @[MSHR.scala:319:32] wire [1:0] _prior_out_T_1; // @[MSHR.scala:319:32] assign _prior_out_T_1 = _GEN_11; // @[MSHR.scala:319:32] wire _after_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26] wire [2:0] _GEN_12 = {2'h2, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:39] wire [2:0] _after_out_T_2; // @[MSHR.scala:320:39] assign _after_out_T_2 = _GEN_12; // @[MSHR.scala:320:39] wire [2:0] _prior_out_T_2; // @[MSHR.scala:320:39] assign _prior_out_T_2 = _GEN_12; // @[MSHR.scala:320:39] wire [2:0] _GEN_13 = {2'h3, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:76] wire [2:0] _after_out_T_3; // @[MSHR.scala:320:76] assign _after_out_T_3 = _GEN_13; // @[MSHR.scala:320:76] wire [2:0] _prior_out_T_3; // @[MSHR.scala:320:76] assign _prior_out_T_3 = _GEN_13; // @[MSHR.scala:320:76] wire [2:0] _after_out_T_4 = after_c ? _after_out_T_2 : _after_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _GEN_14 = final_meta_writeback_state == 2'h0; // @[MSHR.scala:215:38, :317:26] wire _after_T_3; // @[MSHR.scala:317:26] assign _after_T_3 = _GEN_14; // @[MSHR.scala:317:26] wire _prior_T_3; // @[MSHR.scala:317:26] assign _prior_T_3 = _GEN_14; // @[MSHR.scala:317:26] assign after = _after_T ? {3'h0, _after_out_T} : _after_T_1 ? {2'h0, _after_out_T_1} : _after_T_2 ? {1'h0, _after_out_T_4} : {_after_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26] wire probe_bit = io_sinkc_bits_source_0 == 6'h28; // @[Parameters.scala:46:9] wire _GEN_15 = probes_done | probe_bit; // @[Parameters.scala:46:9] wire _last_probe_T; // @[MSHR.scala:459:33] assign _last_probe_T = _GEN_15; // @[MSHR.scala:459:33] wire _probes_done_T; // @[MSHR.scala:467:32] assign _probes_done_T = _GEN_15; // @[MSHR.scala:459:33, :467:32] wire _last_probe_T_1 = ~excluded_client; // @[MSHR.scala:279:28, :289:53, :459:66] wire _last_probe_T_2 = meta_clients & _last_probe_T_1; // @[MSHR.scala:100:17, :459:{64,66}] wire last_probe = _last_probe_T == _last_probe_T_2; // @[MSHR.scala:459:{33,46,64}] wire _probe_toN_T = io_sinkc_bits_param_0 == 3'h1; // @[Parameters.scala:282:11] wire _probe_toN_T_1 = io_sinkc_bits_param_0 == 3'h2; // @[Parameters.scala:282:43] wire _probe_toN_T_2 = _probe_toN_T | _probe_toN_T_1; // @[Parameters.scala:282:{11,34,43}] wire _probe_toN_T_3 = io_sinkc_bits_param_0 == 3'h5; // @[Parameters.scala:282:75] wire probe_toN = _probe_toN_T_2 | _probe_toN_T_3; // @[Parameters.scala:282:{34,66,75}] wire _probes_toN_T = probe_toN & probe_bit; // @[Parameters.scala:46:9] wire _probes_toN_T_1 = probes_toN | _probes_toN_T; // @[MSHR.scala:151:23, :468:{30,35}] wire _probes_noT_T = io_sinkc_bits_param_0 != 3'h3; // @[MSHR.scala:84:7, :469:53] wire _probes_noT_T_1 = probes_noT | _probes_noT_T; // @[MSHR.scala:152:23, :469:{30,53}] wire _w_rprobeackfirst_T = w_rprobeackfirst | last_probe; // @[MSHR.scala:122:33, :459:46, :470:42] wire _GEN_16 = last_probe & io_sinkc_bits_last_0; // @[MSHR.scala:84:7, :459:46, :471:55] wire _w_rprobeacklast_T; // @[MSHR.scala:471:55] assign _w_rprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55] wire _w_pprobeacklast_T; // @[MSHR.scala:473:55] assign _w_pprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55, :473:55] wire _w_rprobeacklast_T_1 = w_rprobeacklast | _w_rprobeacklast_T; // @[MSHR.scala:123:33, :471:{40,55}] wire _w_pprobeackfirst_T = w_pprobeackfirst | last_probe; // @[MSHR.scala:132:33, :459:46, :472:42] wire _w_pprobeacklast_T_1 = w_pprobeacklast | _w_pprobeacklast_T; // @[MSHR.scala:133:33, :473:{40,55}] wire _set_pprobeack_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77] wire _set_pprobeack_T_1 = io_sinkc_bits_last_0 | _set_pprobeack_T; // @[MSHR.scala:84:7, :475:{59,77}] wire set_pprobeack = last_probe & _set_pprobeack_T_1; // @[MSHR.scala:459:46, :475:{36,59}] wire _w_pprobeack_T = w_pprobeack | set_pprobeack; // @[MSHR.scala:134:33, :475:36, :476:32] wire _w_grant_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77, :490:33] wire _w_grant_T_1 = _w_grant_T | io_sinkd_bits_last_0; // @[MSHR.scala:84:7, :490:{33,41}] wire _gotT_T = io_sinkd_bits_param_0 == 3'h0; // @[MSHR.scala:84:7, :493:35] wire _new_meta_T = io_allocate_valid_0 & io_allocate_bits_repeat_0; // @[MSHR.scala:84:7, :505:40] wire new_meta_dirty = _new_meta_T ? final_meta_writeback_dirty : io_directory_bits_dirty_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [1:0] new_meta_state = _new_meta_T ? final_meta_writeback_state : io_directory_bits_state_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_meta_clients = _new_meta_T ? final_meta_writeback_clients : io_directory_bits_clients_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [8:0] new_meta_tag = _new_meta_T ? final_meta_writeback_tag : io_directory_bits_tag_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_meta_hit = _new_meta_T ? final_meta_writeback_hit : io_directory_bits_hit_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [3:0] new_meta_way = _new_meta_T ? final_meta_writeback_way : io_directory_bits_way_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_request_prio_0 = io_allocate_valid_0 ? allocate_as_full_prio_0 : request_prio_0; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_prio_1 = io_allocate_valid_0 ? allocate_as_full_prio_1 : request_prio_1; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_prio_2 = io_allocate_valid_0 ? allocate_as_full_prio_2 : request_prio_2; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_control = io_allocate_valid_0 ? allocate_as_full_control : request_control; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_opcode = io_allocate_valid_0 ? allocate_as_full_opcode : request_opcode; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_param = io_allocate_valid_0 ? allocate_as_full_param : request_param; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_size = io_allocate_valid_0 ? allocate_as_full_size : request_size; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_source = io_allocate_valid_0 ? allocate_as_full_source : request_source; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [8:0] new_request_tag = io_allocate_valid_0 ? allocate_as_full_tag : request_tag; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_offset = io_allocate_valid_0 ? allocate_as_full_offset : request_offset; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_put = io_allocate_valid_0 ? allocate_as_full_put : request_put; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [10:0] new_request_set = io_allocate_valid_0 ? allocate_as_full_set : request_set; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire _new_needT_T = new_request_opcode[2]; // @[Parameters.scala:269:12] wire _new_needT_T_1 = ~_new_needT_T; // @[Parameters.scala:269:{5,12}] wire _GEN_17 = new_request_opcode == 3'h5; // @[Parameters.scala:270:13] wire _new_needT_T_2; // @[Parameters.scala:270:13] assign _new_needT_T_2 = _GEN_17; // @[Parameters.scala:270:13] wire _new_skipProbe_T_5; // @[Parameters.scala:279:117] assign _new_skipProbe_T_5 = _GEN_17; // @[Parameters.scala:270:13, :279:117] wire _new_needT_T_3 = new_request_param == 3'h1; // @[Parameters.scala:270:42] wire _new_needT_T_4 = _new_needT_T_2 & _new_needT_T_3; // @[Parameters.scala:270:{13,33,42}] wire _new_needT_T_5 = _new_needT_T_1 | _new_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33] wire _T_615 = new_request_opcode == 3'h6; // @[Parameters.scala:271:14] wire _new_needT_T_6; // @[Parameters.scala:271:14] assign _new_needT_T_6 = _T_615; // @[Parameters.scala:271:14] wire _new_skipProbe_T; // @[Parameters.scala:279:12] assign _new_skipProbe_T = _T_615; // @[Parameters.scala:271:14, :279:12] wire _new_needT_T_7 = &new_request_opcode; // @[Parameters.scala:271:52] wire _new_needT_T_8 = _new_needT_T_6 | _new_needT_T_7; // @[Parameters.scala:271:{14,42,52}] wire _new_needT_T_9 = |new_request_param; // @[Parameters.scala:271:89] wire _new_needT_T_10 = _new_needT_T_8 & _new_needT_T_9; // @[Parameters.scala:271:{42,80,89}] wire new_needT = _new_needT_T_5 | _new_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80] wire new_clientBit = new_request_source == 6'h28; // @[Parameters.scala:46:9] wire _new_skipProbe_T_1 = &new_request_opcode; // @[Parameters.scala:271:52, :279:50] wire _new_skipProbe_T_2 = _new_skipProbe_T | _new_skipProbe_T_1; // @[Parameters.scala:279:{12,40,50}] wire _new_skipProbe_T_3 = new_request_opcode == 3'h4; // @[Parameters.scala:279:87] wire _new_skipProbe_T_4 = _new_skipProbe_T_2 | _new_skipProbe_T_3; // @[Parameters.scala:279:{40,77,87}] wire _new_skipProbe_T_7 = _new_skipProbe_T_4; // @[Parameters.scala:279:{77,106}] wire new_skipProbe = _new_skipProbe_T_7 & new_clientBit; // @[Parameters.scala:46:9] wire [3:0] prior; // @[MSHR.scala:314:26] wire _prior_out_T = ~prior_c; // @[MSHR.scala:315:27, :318:32] wire _prior_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26] wire [2:0] _prior_out_T_4 = prior_c ? _prior_out_T_2 : _prior_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] assign prior = _prior_T ? {3'h0, _prior_out_T} : _prior_T_1 ? {2'h0, _prior_out_T_1} : _prior_T_2 ? {1'h0, _prior_out_T_4} : {_prior_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26] wire _T_574 = io_directory_valid_0 | _new_meta_T; // @[MSHR.scala:84:7, :505:40, :539:28]
Generate the Verilog code corresponding to the following Chisel files. File WidthWidget.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.AddressSet import freechips.rocketchip.util.{Repeater, UIntToOH1} // innBeatBytes => the new client-facing bus width class TLWidthWidget(innerBeatBytes: Int)(implicit p: Parameters) extends LazyModule { private def noChangeRequired(manager: TLManagerPortParameters) = manager.beatBytes == innerBeatBytes val node = new TLAdapterNode( clientFn = { case c => c }, managerFn = { case m => m.v1copy(beatBytes = innerBeatBytes) }){ override def circuitIdentity = edges.out.map(_.manager).forall(noChangeRequired) } override lazy val desiredName = s"TLWidthWidget$innerBeatBytes" lazy val module = new Impl class Impl extends LazyModuleImp(this) { def merge[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T]) = { val inBytes = edgeIn.manager.beatBytes val outBytes = edgeOut.manager.beatBytes val ratio = outBytes / inBytes val keepBits = log2Ceil(outBytes) val dropBits = log2Ceil(inBytes) val countBits = log2Ceil(ratio) val size = edgeIn.size(in.bits) val hasData = edgeIn.hasData(in.bits) val limit = UIntToOH1(size, keepBits) >> dropBits val count = RegInit(0.U(countBits.W)) val first = count === 0.U val last = count === limit || !hasData val enable = Seq.tabulate(ratio) { i => !((count ^ i.U) & limit).orR } val corrupt_reg = RegInit(false.B) val corrupt_in = edgeIn.corrupt(in.bits) val corrupt_out = corrupt_in || corrupt_reg when (in.fire) { count := count + 1.U corrupt_reg := corrupt_out when (last) { count := 0.U corrupt_reg := false.B } } def helper(idata: UInt): UInt = { // rdata is X until the first time a multi-beat write occurs. // Prevent the X from leaking outside by jamming the mux control until // the first time rdata is written (and hence no longer X). val rdata_written_once = RegInit(false.B) val masked_enable = enable.map(_ || !rdata_written_once) val odata = Seq.fill(ratio) { WireInit(idata) } val rdata = Reg(Vec(ratio-1, chiselTypeOf(idata))) val pdata = rdata :+ idata val mdata = (masked_enable zip (odata zip pdata)) map { case (e, (o, p)) => Mux(e, o, p) } when (in.fire && !last) { rdata_written_once := true.B (rdata zip mdata) foreach { case (r, m) => r := m } } Cat(mdata.reverse) } in.ready := out.ready || !last out.valid := in.valid && last out.bits := in.bits // Don't put down hardware if we never carry data edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits))) edgeOut.corrupt(out.bits) := corrupt_out (out.bits, in.bits) match { case (o: TLBundleA, i: TLBundleA) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W)) case (o: TLBundleB, i: TLBundleB) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W)) case (o: TLBundleC, i: TLBundleC) => () case (o: TLBundleD, i: TLBundleD) => () case _ => require(false, "Impossible bundle combination in WidthWidget") } } def split[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = { val inBytes = edgeIn.manager.beatBytes val outBytes = edgeOut.manager.beatBytes val ratio = inBytes / outBytes val keepBits = log2Ceil(inBytes) val dropBits = log2Ceil(outBytes) val countBits = log2Ceil(ratio) val size = edgeIn.size(in.bits) val hasData = edgeIn.hasData(in.bits) val limit = UIntToOH1(size, keepBits) >> dropBits val count = RegInit(0.U(countBits.W)) val first = count === 0.U val last = count === limit || !hasData when (out.fire) { count := count + 1.U when (last) { count := 0.U } } // For sub-beat transfer, extract which part matters val sel = in.bits match { case a: TLBundleA => a.address(keepBits-1, dropBits) case b: TLBundleB => b.address(keepBits-1, dropBits) case c: TLBundleC => c.address(keepBits-1, dropBits) case d: TLBundleD => { val sel = sourceMap(d.source) val hold = Mux(first, sel, RegEnable(sel, first)) // a_first is not for whole xfer hold & ~limit // if more than one a_first/xfer, the address must be aligned anyway } } val index = sel | count def helper(idata: UInt, width: Int): UInt = { val mux = VecInit.tabulate(ratio) { i => idata((i+1)*outBytes*width-1, i*outBytes*width) } mux(index) } out.bits := in.bits out.valid := in.valid in.ready := out.ready // Don't put down hardware if we never carry data edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits), 8)) (out.bits, in.bits) match { case (o: TLBundleA, i: TLBundleA) => o.mask := helper(i.mask, 1) case (o: TLBundleB, i: TLBundleB) => o.mask := helper(i.mask, 1) case (o: TLBundleC, i: TLBundleC) => () // replicating corrupt to all beats is ok case (o: TLBundleD, i: TLBundleD) => () case _ => require(false, "Impossbile bundle combination in WidthWidget") } // Repeat the input if we're not last !last } def splice[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = { if (edgeIn.manager.beatBytes == edgeOut.manager.beatBytes) { // nothing to do; pass it through out.bits := in.bits out.valid := in.valid in.ready := out.ready } else if (edgeIn.manager.beatBytes > edgeOut.manager.beatBytes) { // split input to output val repeat = Wire(Bool()) val repeated = Repeater(in, repeat) val cated = Wire(chiselTypeOf(repeated)) cated <> repeated edgeIn.data(cated.bits) := Cat( edgeIn.data(repeated.bits)(edgeIn.manager.beatBytes*8-1, edgeOut.manager.beatBytes*8), edgeIn.data(in.bits)(edgeOut.manager.beatBytes*8-1, 0)) repeat := split(edgeIn, cated, edgeOut, out, sourceMap) } else { // merge input to output merge(edgeIn, in, edgeOut, out) } } (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => // If the master is narrower than the slave, the D channel must be narrowed. // This is tricky, because the D channel has no address data. // Thus, you don't know which part of a sub-beat transfer to extract. // To fix this, we record the relevant address bits for all sources. // The assumption is that this sort of situation happens only where // you connect a narrow master to the system bus, so there are few sources. def sourceMap(source_bits: UInt) = { val source = if (edgeIn.client.endSourceId == 1) 0.U(0.W) else source_bits require (edgeOut.manager.beatBytes > edgeIn.manager.beatBytes) val keepBits = log2Ceil(edgeOut.manager.beatBytes) val dropBits = log2Ceil(edgeIn.manager.beatBytes) val sources = Reg(Vec(edgeIn.client.endSourceId, UInt((keepBits-dropBits).W))) val a_sel = in.a.bits.address(keepBits-1, dropBits) when (in.a.fire) { if (edgeIn.client.endSourceId == 1) { // avoid extraction-index-width warning sources(0) := a_sel } else { sources(in.a.bits.source) := a_sel } } // depopulate unused source registers: edgeIn.client.unusedSources.foreach { id => sources(id) := 0.U } val bypass = in.a.valid && in.a.bits.source === source if (edgeIn.manager.minLatency > 0) sources(source) else Mux(bypass, a_sel, sources(source)) } splice(edgeIn, in.a, edgeOut, out.a, sourceMap) splice(edgeOut, out.d, edgeIn, in.d, sourceMap) if (edgeOut.manager.anySupportAcquireB && edgeIn.client.anySupportProbe) { splice(edgeOut, out.b, edgeIn, in.b, sourceMap) splice(edgeIn, in.c, edgeOut, out.c, sourceMap) out.e.valid := in.e.valid out.e.bits := in.e.bits in.e.ready := out.e.ready } else { in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B } } } } object TLWidthWidget { def apply(innerBeatBytes: Int)(implicit p: Parameters): TLNode = { val widget = LazyModule(new TLWidthWidget(innerBeatBytes)) widget.node } def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper.beatBytes) } // Synthesizable unit tests import freechips.rocketchip.unittest._ class TLRAMWidthWidget(first: Int, second: Int, txns: Int)(implicit p: Parameters) extends LazyModule { val fuzz = LazyModule(new TLFuzzer(txns)) val model = LazyModule(new TLRAMModel("WidthWidget")) val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff))) (ram.node := TLDelayer(0.1) := TLFragmenter(4, 256) := TLWidthWidget(second) := TLWidthWidget(first) := TLDelayer(0.1) := model.node := fuzz.node) lazy val module = new Impl class Impl extends LazyModuleImp(this) with UnitTestModule { io.finished := fuzz.module.io.finished } } class TLRAMWidthWidgetTest(little: Int, big: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) { val dut = Module(LazyModule(new TLRAMWidthWidget(little,big,txns)).module) dut.io.start := DontCare io.finished := dut.io.finished } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } }
module TLInterconnectCoupler_fbus_from_debug_sb( // @[LazyModuleImp.scala:138:7] input clock, // @[LazyModuleImp.scala:138:7] input reset, // @[LazyModuleImp.scala:138:7] output auto_widget_anon_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_widget_anon_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_widget_anon_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [3:0] auto_widget_anon_in_a_bits_size, // @[LazyModuleImp.scala:107:25] input [31:0] auto_widget_anon_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_widget_anon_in_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_widget_anon_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_widget_anon_in_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_widget_anon_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_widget_anon_in_d_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_widget_anon_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output [3:0] auto_widget_anon_in_d_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_widget_anon_in_d_bits_denied, // @[LazyModuleImp.scala:107:25] output [7:0] auto_widget_anon_in_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_widget_anon_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_tl_out_a_ready, // @[LazyModuleImp.scala:107:25] output auto_tl_out_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [3:0] auto_tl_out_a_bits_size, // @[LazyModuleImp.scala:107:25] output [31:0] auto_tl_out_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_tl_out_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_tl_out_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_tl_out_d_ready, // @[LazyModuleImp.scala:107:25] input auto_tl_out_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_tl_out_d_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_tl_out_d_bits_size, // @[LazyModuleImp.scala:107:25] input [3:0] auto_tl_out_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_tl_out_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_tl_out_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_tl_out_d_bits_corrupt // @[LazyModuleImp.scala:107:25] ); TLWidthWidget1 widget ( // @[WidthWidget.scala:230:28] .clock (clock), .reset (reset), .auto_anon_in_a_ready (auto_widget_anon_in_a_ready), .auto_anon_in_a_valid (auto_widget_anon_in_a_valid), .auto_anon_in_a_bits_opcode (auto_widget_anon_in_a_bits_opcode), .auto_anon_in_a_bits_size (auto_widget_anon_in_a_bits_size), .auto_anon_in_a_bits_address (auto_widget_anon_in_a_bits_address), .auto_anon_in_a_bits_data (auto_widget_anon_in_a_bits_data), .auto_anon_in_d_ready (auto_widget_anon_in_d_ready), .auto_anon_in_d_valid (auto_widget_anon_in_d_valid), .auto_anon_in_d_bits_opcode (auto_widget_anon_in_d_bits_opcode), .auto_anon_in_d_bits_param (auto_widget_anon_in_d_bits_param), .auto_anon_in_d_bits_size (auto_widget_anon_in_d_bits_size), .auto_anon_in_d_bits_sink (auto_widget_anon_in_d_bits_sink), .auto_anon_in_d_bits_denied (auto_widget_anon_in_d_bits_denied), .auto_anon_in_d_bits_data (auto_widget_anon_in_d_bits_data), .auto_anon_in_d_bits_corrupt (auto_widget_anon_in_d_bits_corrupt), .auto_anon_out_a_ready (auto_tl_out_a_ready), .auto_anon_out_a_valid (auto_tl_out_a_valid), .auto_anon_out_a_bits_opcode (auto_tl_out_a_bits_opcode), .auto_anon_out_a_bits_size (auto_tl_out_a_bits_size), .auto_anon_out_a_bits_address (auto_tl_out_a_bits_address), .auto_anon_out_a_bits_mask (auto_tl_out_a_bits_mask), .auto_anon_out_a_bits_data (auto_tl_out_a_bits_data), .auto_anon_out_d_ready (auto_tl_out_d_ready), .auto_anon_out_d_valid (auto_tl_out_d_valid), .auto_anon_out_d_bits_opcode (auto_tl_out_d_bits_opcode), .auto_anon_out_d_bits_param (auto_tl_out_d_bits_param), .auto_anon_out_d_bits_size (auto_tl_out_d_bits_size), .auto_anon_out_d_bits_sink (auto_tl_out_d_bits_sink), .auto_anon_out_d_bits_denied (auto_tl_out_d_bits_denied), .auto_anon_out_d_bits_data (auto_tl_out_d_bits_data), .auto_anon_out_d_bits_corrupt (auto_tl_out_d_bits_corrupt) ); // @[WidthWidget.scala:230:28] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w1_d3_i0_49( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] output io_q // @[ShiftReg.scala:36:14] ); wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire io_d = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41] wire _output_T_1 = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire io_q_0; // @[SynchronizerReg.scala:80:7] assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_65 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_q (output_0) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_40( // @[AsyncQueue.scala:58:7] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in = 1'h1; // @[ShiftReg.scala:45:23] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_51 io_out_source_valid_0 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File LoadOrderBuffer.scala: package saturn.mem import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.tile._ import freechips.rocketchip.util._ import saturn.common._ object AgePriorityEncoder { def apply(in: Seq[Bool], head: UInt): UInt = { val n = in.size val width = log2Ceil(in.size) val n_padded = 1 << width val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in val idx = PriorityEncoder(temp_vec) idx(width-1, 0) //discard msb } } class LoadOrderBuffer(nEntries: Int, nRobEntries: Int)(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams { require(nEntries > 0, "Queue must have non-negative number of entries") require(nRobEntries <= nEntries) val tagBits = log2Ceil(nEntries) val io = IO(new Bundle { val reserve = Decoupled(UInt(tagBits.W)) val entry = Input(new IFQEntry) val push = Input(Valid(new Bundle { val data = UInt(dLen.W) val tag = UInt(tagBits.W) })) val replay_liq_id = Output(UInt(log2Ceil(vParams.vliqEntries).W)) val replay = Decoupled(new MemRequest(dLenB, dmemTagBits)) val deq = Decoupled(new IFQEntry) val deq_data = Output(UInt(dLen.W)) val busy = Output(Bool()) }) val simpleRob = nEntries == nRobEntries val valids = RegInit(VecInit.fill(nEntries)(false.B)) val must_replay = RegInit(VecInit.fill(nEntries)(false.B)) val entries = Reg(Vec(nEntries, new IFQEntry)) val rob_idxs = Reg(Vec(nEntries, UInt(log2Ceil(nRobEntries).W))) val rob = Reg(Vec(nRobEntries, UInt(dLen.W))) val rob_valids = RegInit(VecInit.fill(nRobEntries)(false.B)) val enq_ptr = Counter(nEntries) val deq_ptr = Counter(nEntries) val maybe_full = RegInit(false.B) val has_replay = must_replay.orR val rob_full = rob_valids.andR && (!simpleRob).B val rob_next = PriorityEncoder(~rob_valids) val ptr_match = enq_ptr.value === deq_ptr.value val empty = ptr_match && !maybe_full val full = ptr_match && maybe_full io.busy := !empty io.reserve.valid := !full && !has_replay io.reserve.bits := enq_ptr.value when (io.reserve.fire) { entries(enq_ptr.value) := io.entry valids(enq_ptr.value) := io.entry.masked enq_ptr.inc() } io.deq.valid := !empty && (valids(deq_ptr.value) || (io.push.fire && io.push.bits.tag === deq_ptr.value)) io.deq.bits := entries(deq_ptr.value) val rob_deq_idx = if (simpleRob) deq_ptr.value else rob_idxs(deq_ptr.value) io.deq_data := Mux(valids(deq_ptr.value), rob(rob_deq_idx), io.push.bits.data) val rob_push_idx = if (simpleRob) io.push.bits.tag else rob_next when (io.push.valid && !(deq_ptr.value === io.push.bits.tag && io.deq.ready)) { when (rob_full) { must_replay(io.push.bits.tag) := true.B } .otherwise { valids(io.push.bits.tag) := true.B rob_idxs(io.push.bits.tag) := rob_next rob_valids(rob_push_idx) := true.B rob(rob_push_idx) := io.push.bits.data } } when (io.deq.fire) { deq_ptr.inc() valids(deq_ptr.value) := false.B when (valids(deq_ptr.value) && !entries(deq_ptr.value).masked) { rob_valids(rob_deq_idx) := false.B } } val replay_valid = must_replay.orR val next_replay = AgePriorityEncoder(must_replay, deq_ptr.value) io.replay_liq_id := entries(next_replay).lsiq_id io.replay.valid := replay_valid io.replay.bits.addr := entries(next_replay).page_offset io.replay.bits.data := DontCare io.replay.bits.mask := ~(0.U(dLenB.W)) io.replay.bits.tag := next_replay io.replay.bits.store := false.B when (io.replay.fire) { must_replay(next_replay) := false.B } when (io.reserve.fire =/= io.deq.fire) { maybe_full := io.reserve.fire } }
module LoadOrderBuffer( // @[LoadOrderBuffer.scala:22:7] input clock, // @[LoadOrderBuffer.scala:22:7] input reset, // @[LoadOrderBuffer.scala:22:7] input io_reserve_ready, // @[LoadOrderBuffer.scala:27:14] output io_reserve_valid, // @[LoadOrderBuffer.scala:27:14] output [2:0] io_reserve_bits, // @[LoadOrderBuffer.scala:27:14] input [2:0] io_entry_head, // @[LoadOrderBuffer.scala:27:14] input [2:0] io_entry_tail, // @[LoadOrderBuffer.scala:27:14] input io_entry_masked, // @[LoadOrderBuffer.scala:27:14] input [1:0] io_entry_lsiq_id, // @[LoadOrderBuffer.scala:27:14] input [11:0] io_entry_page_offset, // @[LoadOrderBuffer.scala:27:14] input io_push_valid, // @[LoadOrderBuffer.scala:27:14] input [63:0] io_push_bits_data, // @[LoadOrderBuffer.scala:27:14] input [2:0] io_push_bits_tag, // @[LoadOrderBuffer.scala:27:14] output [1:0] io_replay_liq_id, // @[LoadOrderBuffer.scala:27:14] input io_replay_ready, // @[LoadOrderBuffer.scala:27:14] output io_replay_valid, // @[LoadOrderBuffer.scala:27:14] output [39:0] io_replay_bits_addr, // @[LoadOrderBuffer.scala:27:14] output [3:0] io_replay_bits_tag, // @[LoadOrderBuffer.scala:27:14] input io_deq_ready, // @[LoadOrderBuffer.scala:27:14] output io_deq_valid, // @[LoadOrderBuffer.scala:27:14] output [2:0] io_deq_bits_head, // @[LoadOrderBuffer.scala:27:14] output [2:0] io_deq_bits_tail, // @[LoadOrderBuffer.scala:27:14] output [63:0] io_deq_data // @[LoadOrderBuffer.scala:27:14] ); reg valids_0; // @[LoadOrderBuffer.scala:44:23] reg valids_1; // @[LoadOrderBuffer.scala:44:23] reg valids_2; // @[LoadOrderBuffer.scala:44:23] reg valids_3; // @[LoadOrderBuffer.scala:44:23] reg valids_4; // @[LoadOrderBuffer.scala:44:23] reg valids_5; // @[LoadOrderBuffer.scala:44:23] reg valids_6; // @[LoadOrderBuffer.scala:44:23] reg valids_7; // @[LoadOrderBuffer.scala:44:23] reg must_replay_0; // @[LoadOrderBuffer.scala:45:28] reg must_replay_1; // @[LoadOrderBuffer.scala:45:28] reg must_replay_2; // @[LoadOrderBuffer.scala:45:28] reg must_replay_3; // @[LoadOrderBuffer.scala:45:28] reg must_replay_4; // @[LoadOrderBuffer.scala:45:28] reg must_replay_5; // @[LoadOrderBuffer.scala:45:28] reg must_replay_6; // @[LoadOrderBuffer.scala:45:28] reg must_replay_7; // @[LoadOrderBuffer.scala:45:28] reg [2:0] entries_0_head; // @[LoadOrderBuffer.scala:46:20] reg [2:0] entries_0_tail; // @[LoadOrderBuffer.scala:46:20] reg entries_0_masked; // @[LoadOrderBuffer.scala:46:20] reg [1:0] entries_0_lsiq_id; // @[LoadOrderBuffer.scala:46:20] reg [11:0] entries_0_page_offset; // @[LoadOrderBuffer.scala:46:20] reg [2:0] entries_1_head; // @[LoadOrderBuffer.scala:46:20] reg [2:0] entries_1_tail; // @[LoadOrderBuffer.scala:46:20] reg entries_1_masked; // @[LoadOrderBuffer.scala:46:20] reg [1:0] entries_1_lsiq_id; // @[LoadOrderBuffer.scala:46:20] reg [11:0] entries_1_page_offset; // @[LoadOrderBuffer.scala:46:20] reg [2:0] entries_2_head; // @[LoadOrderBuffer.scala:46:20] reg [2:0] entries_2_tail; // @[LoadOrderBuffer.scala:46:20] reg entries_2_masked; // @[LoadOrderBuffer.scala:46:20] reg [1:0] entries_2_lsiq_id; // @[LoadOrderBuffer.scala:46:20] reg [11:0] entries_2_page_offset; // @[LoadOrderBuffer.scala:46:20] reg [2:0] entries_3_head; // @[LoadOrderBuffer.scala:46:20] reg [2:0] entries_3_tail; // @[LoadOrderBuffer.scala:46:20] reg entries_3_masked; // @[LoadOrderBuffer.scala:46:20] reg [1:0] entries_3_lsiq_id; // @[LoadOrderBuffer.scala:46:20] reg [11:0] entries_3_page_offset; // @[LoadOrderBuffer.scala:46:20] reg [2:0] entries_4_head; // @[LoadOrderBuffer.scala:46:20] reg [2:0] entries_4_tail; // @[LoadOrderBuffer.scala:46:20] reg entries_4_masked; // @[LoadOrderBuffer.scala:46:20] reg [1:0] entries_4_lsiq_id; // @[LoadOrderBuffer.scala:46:20] reg [11:0] entries_4_page_offset; // @[LoadOrderBuffer.scala:46:20] reg [2:0] entries_5_head; // @[LoadOrderBuffer.scala:46:20] reg [2:0] entries_5_tail; // @[LoadOrderBuffer.scala:46:20] reg entries_5_masked; // @[LoadOrderBuffer.scala:46:20] reg [1:0] entries_5_lsiq_id; // @[LoadOrderBuffer.scala:46:20] reg [11:0] entries_5_page_offset; // @[LoadOrderBuffer.scala:46:20] reg [2:0] entries_6_head; // @[LoadOrderBuffer.scala:46:20] reg [2:0] entries_6_tail; // @[LoadOrderBuffer.scala:46:20] reg entries_6_masked; // @[LoadOrderBuffer.scala:46:20] reg [1:0] entries_6_lsiq_id; // @[LoadOrderBuffer.scala:46:20] reg [11:0] entries_6_page_offset; // @[LoadOrderBuffer.scala:46:20] reg [2:0] entries_7_head; // @[LoadOrderBuffer.scala:46:20] reg [2:0] entries_7_tail; // @[LoadOrderBuffer.scala:46:20] reg entries_7_masked; // @[LoadOrderBuffer.scala:46:20] reg [1:0] entries_7_lsiq_id; // @[LoadOrderBuffer.scala:46:20] reg [11:0] entries_7_page_offset; // @[LoadOrderBuffer.scala:46:20] reg [1:0] rob_idxs_0; // @[LoadOrderBuffer.scala:47:21] reg [1:0] rob_idxs_1; // @[LoadOrderBuffer.scala:47:21] reg [1:0] rob_idxs_2; // @[LoadOrderBuffer.scala:47:21] reg [1:0] rob_idxs_3; // @[LoadOrderBuffer.scala:47:21] reg [1:0] rob_idxs_4; // @[LoadOrderBuffer.scala:47:21] reg [1:0] rob_idxs_5; // @[LoadOrderBuffer.scala:47:21] reg [1:0] rob_idxs_6; // @[LoadOrderBuffer.scala:47:21] reg [1:0] rob_idxs_7; // @[LoadOrderBuffer.scala:47:21] reg [63:0] rob_0; // @[LoadOrderBuffer.scala:48:16] reg [63:0] rob_1; // @[LoadOrderBuffer.scala:48:16] reg [63:0] rob_2; // @[LoadOrderBuffer.scala:48:16] reg [63:0] rob_3; // @[LoadOrderBuffer.scala:48:16] reg rob_valids_0; // @[LoadOrderBuffer.scala:49:27] reg rob_valids_1; // @[LoadOrderBuffer.scala:49:27] reg rob_valids_2; // @[LoadOrderBuffer.scala:49:27] reg rob_valids_3; // @[LoadOrderBuffer.scala:49:27] reg [2:0] enq_ptr_value; // @[Counter.scala:61:40] reg [2:0] deq_ptr_value; // @[Counter.scala:61:40] reg maybe_full; // @[LoadOrderBuffer.scala:53:27] wire _replay_valid_T = must_replay_0 | must_replay_1; // @[LoadOrderBuffer.scala:45:28] wire ptr_match = enq_ptr_value == deq_ptr_value; // @[Counter.scala:61:40] wire io_reserve_valid_0 = ~(ptr_match & maybe_full) & ~(_replay_valid_T | must_replay_2 | must_replay_3 | must_replay_4 | must_replay_5 | must_replay_6 | must_replay_7); // @[LoadOrderBuffer.scala:45:28, :53:27, :57:33, :59:24, :62:{23,29,32}] wire _io_deq_valid_T_1 = io_push_bits_tag == deq_ptr_value; // @[Counter.scala:61:40] wire [7:0] _GEN = {{valids_7}, {valids_6}, {valids_5}, {valids_4}, {valids_3}, {valids_2}, {valids_1}, {valids_0}}; // @[LoadOrderBuffer.scala:44:23, :70:52] wire _GEN_0 = _GEN[deq_ptr_value]; // @[Counter.scala:61:40] wire io_deq_valid_0 = ~(ptr_match & ~maybe_full) & (_GEN_0 | io_push_valid & _io_deq_valid_T_1); // @[LoadOrderBuffer.scala:53:27, :57:33, :58:{25,28}, :61:14, :70:{26,52,69,89}] wire [7:0][2:0] _GEN_1 = {{entries_7_head}, {entries_6_head}, {entries_5_head}, {entries_4_head}, {entries_3_head}, {entries_2_head}, {entries_1_head}, {entries_0_head}}; // @[LoadOrderBuffer.scala:46:20, :71:15] wire [7:0][2:0] _GEN_2 = {{entries_7_tail}, {entries_6_tail}, {entries_5_tail}, {entries_4_tail}, {entries_3_tail}, {entries_2_tail}, {entries_1_tail}, {entries_0_tail}}; // @[LoadOrderBuffer.scala:46:20, :71:15] wire [7:0][1:0] _GEN_3 = {{rob_idxs_7}, {rob_idxs_6}, {rob_idxs_5}, {rob_idxs_4}, {rob_idxs_3}, {rob_idxs_2}, {rob_idxs_1}, {rob_idxs_0}}; // @[LoadOrderBuffer.scala:47:21, :73:21] wire [1:0] _GEN_4 = _GEN_3[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][63:0] _GEN_5 = {{rob_3}, {rob_2}, {rob_1}, {rob_0}}; // @[LoadOrderBuffer.scala:48:16, :73:21] wire replay_valid = _replay_valid_T | must_replay_2 | must_replay_3 | must_replay_4 | must_replay_5 | must_replay_6 | must_replay_7; // @[LoadOrderBuffer.scala:45:28] wire [2:0] next_replay_idx = must_replay_0 & deq_ptr_value == 3'h0 ? 3'h0 : must_replay_1 & deq_ptr_value < 3'h2 ? 3'h1 : must_replay_2 & deq_ptr_value < 3'h3 ? 3'h2 : must_replay_3 & ~(deq_ptr_value[2]) ? 3'h3 : must_replay_4 & deq_ptr_value < 3'h5 ? 3'h4 : must_replay_5 & deq_ptr_value[2:1] != 2'h3 ? 3'h5 : must_replay_6 & deq_ptr_value != 3'h7 ? 3'h6 : must_replay_7 ? 3'h7 : must_replay_0 ? 3'h0 : must_replay_1 ? 3'h1 : must_replay_2 ? 3'h2 : must_replay_3 ? 3'h3 : must_replay_4 ? 3'h4 : must_replay_5 ? 3'h5 : {2'h3, ~must_replay_6}; // @[Mux.scala:50:70] wire [7:0][1:0] _GEN_6 = {{entries_7_lsiq_id}, {entries_6_lsiq_id}, {entries_5_lsiq_id}, {entries_4_lsiq_id}, {entries_3_lsiq_id}, {entries_2_lsiq_id}, {entries_1_lsiq_id}, {entries_0_lsiq_id}}; // @[LoadOrderBuffer.scala:46:20, :97:20] wire [7:0][11:0] _GEN_7 = {{entries_7_page_offset}, {entries_6_page_offset}, {entries_5_page_offset}, {entries_4_page_offset}, {entries_3_page_offset}, {entries_2_page_offset}, {entries_1_page_offset}, {entries_0_page_offset}}; // @[LoadOrderBuffer.scala:46:20, :97:20] wire _GEN_8 = io_deq_ready & io_deq_valid_0; // @[Decoupled.scala:51:35] wire [7:0] _GEN_9 = {{entries_7_masked}, {entries_6_masked}, {entries_5_masked}, {entries_4_masked}, {entries_3_masked}, {entries_2_masked}, {entries_1_masked}, {entries_0_masked}}; // @[LoadOrderBuffer.scala:46:20, :71:15] wire _GEN_10 = _GEN_0 & ~_GEN_9[deq_ptr_value]; // @[Counter.scala:61:40] wire _GEN_11 = io_replay_ready & replay_valid; // @[Decoupled.scala:51:35] wire rob_full = rob_valids_0 & rob_valids_1 & rob_valids_2 & rob_valids_3; // @[LoadOrderBuffer.scala:49:27] wire [1:0] rob_next = rob_valids_0 ? (rob_valids_1 ? {1'h1, rob_valids_2} : 2'h1) : 2'h0; // @[Mux.scala:50:70] wire _maybe_full_T = io_reserve_ready & io_reserve_valid_0; // @[Decoupled.scala:51:35] wire _GEN_12 = _maybe_full_T & enq_ptr_value == 3'h0; // @[Decoupled.scala:51:35] wire _GEN_13 = _maybe_full_T & enq_ptr_value == 3'h1; // @[Decoupled.scala:51:35] wire _GEN_14 = _maybe_full_T & enq_ptr_value == 3'h2; // @[Decoupled.scala:51:35] wire _GEN_15 = _maybe_full_T & enq_ptr_value == 3'h3; // @[Decoupled.scala:51:35] wire _GEN_16 = _maybe_full_T & enq_ptr_value == 3'h4; // @[Decoupled.scala:51:35] wire _GEN_17 = _maybe_full_T & enq_ptr_value == 3'h5; // @[Decoupled.scala:51:35] wire _GEN_18 = _maybe_full_T & enq_ptr_value == 3'h6; // @[Decoupled.scala:51:35] wire _GEN_19 = _maybe_full_T & (&enq_ptr_value); // @[Decoupled.scala:51:35] wire _GEN_20 = io_push_valid & ~(_io_deq_valid_T_1 & io_deq_ready); // @[LoadOrderBuffer.scala:70:89, :76:{23,26,63}] wire _GEN_21 = io_push_bits_tag == 3'h0; // @[LoadOrderBuffer.scala:78:37] wire _GEN_22 = io_push_bits_tag == 3'h1; // @[LoadOrderBuffer.scala:78:37] wire _GEN_23 = io_push_bits_tag == 3'h2; // @[LoadOrderBuffer.scala:78:37] wire _GEN_24 = io_push_bits_tag == 3'h3; // @[LoadOrderBuffer.scala:78:37] wire _GEN_25 = io_push_bits_tag == 3'h4; // @[LoadOrderBuffer.scala:78:37] wire _GEN_26 = io_push_bits_tag == 3'h5; // @[LoadOrderBuffer.scala:78:37] wire _GEN_27 = io_push_bits_tag == 3'h6; // @[LoadOrderBuffer.scala:78:37] wire _GEN_28 = rob_next == 2'h0; // @[Mux.scala:50:70] wire _GEN_29 = rob_next == 2'h1; // @[Mux.scala:50:70] wire _GEN_30 = rob_next == 2'h2; // @[Mux.scala:50:70] always @(posedge clock) begin // @[LoadOrderBuffer.scala:22:7] if (reset) begin // @[LoadOrderBuffer.scala:22:7] valids_0 <= 1'h0; // @[LoadOrderBuffer.scala:44:23] valids_1 <= 1'h0; // @[LoadOrderBuffer.scala:44:23] valids_2 <= 1'h0; // @[LoadOrderBuffer.scala:44:23] valids_3 <= 1'h0; // @[LoadOrderBuffer.scala:44:23] valids_4 <= 1'h0; // @[LoadOrderBuffer.scala:44:23] valids_5 <= 1'h0; // @[LoadOrderBuffer.scala:44:23] valids_6 <= 1'h0; // @[LoadOrderBuffer.scala:44:23] valids_7 <= 1'h0; // @[LoadOrderBuffer.scala:44:23] must_replay_0 <= 1'h0; // @[LoadOrderBuffer.scala:45:28] must_replay_1 <= 1'h0; // @[LoadOrderBuffer.scala:45:28] must_replay_2 <= 1'h0; // @[LoadOrderBuffer.scala:45:28] must_replay_3 <= 1'h0; // @[LoadOrderBuffer.scala:45:28] must_replay_4 <= 1'h0; // @[LoadOrderBuffer.scala:45:28] must_replay_5 <= 1'h0; // @[LoadOrderBuffer.scala:45:28] must_replay_6 <= 1'h0; // @[LoadOrderBuffer.scala:45:28] must_replay_7 <= 1'h0; // @[LoadOrderBuffer.scala:45:28] rob_valids_0 <= 1'h0; // @[LoadOrderBuffer.scala:49:27] rob_valids_1 <= 1'h0; // @[LoadOrderBuffer.scala:49:27] rob_valids_2 <= 1'h0; // @[LoadOrderBuffer.scala:49:27] rob_valids_3 <= 1'h0; // @[LoadOrderBuffer.scala:49:27] enq_ptr_value <= 3'h0; // @[Counter.scala:61:40] deq_ptr_value <= 3'h0; // @[Counter.scala:61:40] maybe_full <= 1'h0; // @[LoadOrderBuffer.scala:53:27] end else begin // @[LoadOrderBuffer.scala:22:7] valids_0 <= ~(_GEN_8 & deq_ptr_value == 3'h0) & (_GEN_20 & ~rob_full & _GEN_21 | (_GEN_12 ? io_entry_masked : valids_0)); // @[Decoupled.scala:51:35] valids_1 <= ~(_GEN_8 & deq_ptr_value == 3'h1) & (_GEN_20 & ~rob_full & _GEN_22 | (_GEN_13 ? io_entry_masked : valids_1)); // @[Decoupled.scala:51:35] valids_2 <= ~(_GEN_8 & deq_ptr_value == 3'h2) & (_GEN_20 & ~rob_full & _GEN_23 | (_GEN_14 ? io_entry_masked : valids_2)); // @[Decoupled.scala:51:35] valids_3 <= ~(_GEN_8 & deq_ptr_value == 3'h3) & (_GEN_20 & ~rob_full & _GEN_24 | (_GEN_15 ? io_entry_masked : valids_3)); // @[Decoupled.scala:51:35] valids_4 <= ~(_GEN_8 & deq_ptr_value == 3'h4) & (_GEN_20 & ~rob_full & _GEN_25 | (_GEN_16 ? io_entry_masked : valids_4)); // @[Decoupled.scala:51:35] valids_5 <= ~(_GEN_8 & deq_ptr_value == 3'h5) & (_GEN_20 & ~rob_full & _GEN_26 | (_GEN_17 ? io_entry_masked : valids_5)); // @[Decoupled.scala:51:35] valids_6 <= ~(_GEN_8 & deq_ptr_value == 3'h6) & (_GEN_20 & ~rob_full & _GEN_27 | (_GEN_18 ? io_entry_masked : valids_6)); // @[Decoupled.scala:51:35] valids_7 <= ~(_GEN_8 & (&deq_ptr_value)) & (_GEN_20 & ~rob_full & (&io_push_bits_tag) | (_GEN_19 ? io_entry_masked : valids_7)); // @[Decoupled.scala:51:35] must_replay_0 <= ~(_GEN_11 & next_replay_idx == 3'h0) & (_GEN_20 & rob_full & _GEN_21 | must_replay_0); // @[Mux.scala:50:70] must_replay_1 <= ~(_GEN_11 & next_replay_idx == 3'h1) & (_GEN_20 & rob_full & _GEN_22 | must_replay_1); // @[Mux.scala:50:70] must_replay_2 <= ~(_GEN_11 & next_replay_idx == 3'h2) & (_GEN_20 & rob_full & _GEN_23 | must_replay_2); // @[Mux.scala:50:70] must_replay_3 <= ~(_GEN_11 & next_replay_idx == 3'h3) & (_GEN_20 & rob_full & _GEN_24 | must_replay_3); // @[Mux.scala:50:70] must_replay_4 <= ~(_GEN_11 & next_replay_idx == 3'h4) & (_GEN_20 & rob_full & _GEN_25 | must_replay_4); // @[Mux.scala:50:70] must_replay_5 <= ~(_GEN_11 & next_replay_idx == 3'h5) & (_GEN_20 & rob_full & _GEN_26 | must_replay_5); // @[Mux.scala:50:70] must_replay_6 <= ~(_GEN_11 & next_replay_idx == 3'h6) & (_GEN_20 & rob_full & _GEN_27 | must_replay_6); // @[Mux.scala:50:70] must_replay_7 <= ~(_GEN_11 & (&next_replay_idx)) & (_GEN_20 & rob_full & (&io_push_bits_tag) | must_replay_7); // @[Mux.scala:50:70] rob_valids_0 <= ~(_GEN_8 & _GEN_10 & _GEN_4 == 2'h0) & (_GEN_20 & ~rob_full & _GEN_28 | rob_valids_0); // @[Decoupled.scala:51:35] rob_valids_1 <= ~(_GEN_8 & _GEN_10 & _GEN_4 == 2'h1) & (_GEN_20 & ~rob_full & _GEN_29 | rob_valids_1); // @[Decoupled.scala:51:35] rob_valids_2 <= ~(_GEN_8 & _GEN_10 & _GEN_4 == 2'h2) & (_GEN_20 & ~rob_full & _GEN_30 | rob_valids_2); // @[Decoupled.scala:51:35] rob_valids_3 <= ~(_GEN_8 & _GEN_10 & (&_GEN_4)) & (_GEN_20 & ~rob_full & (&rob_next) | rob_valids_3); // @[Mux.scala:50:70] if (_maybe_full_T) // @[Decoupled.scala:51:35] enq_ptr_value <= enq_ptr_value + 3'h1; // @[Counter.scala:61:40, :77:24] if (_GEN_8) // @[Decoupled.scala:51:35] deq_ptr_value <= deq_ptr_value + 3'h1; // @[Counter.scala:61:40, :77:24] if (~(_maybe_full_T == _GEN_8)) // @[Decoupled.scala:51:35] maybe_full <= _maybe_full_T; // @[Decoupled.scala:51:35] end if (_GEN_12) begin // @[LoadOrderBuffer.scala:46:20, :64:26, :65:28] entries_0_head <= io_entry_head; // @[LoadOrderBuffer.scala:46:20] entries_0_tail <= io_entry_tail; // @[LoadOrderBuffer.scala:46:20] entries_0_masked <= io_entry_masked; // @[LoadOrderBuffer.scala:46:20] entries_0_lsiq_id <= io_entry_lsiq_id; // @[LoadOrderBuffer.scala:46:20] entries_0_page_offset <= io_entry_page_offset; // @[LoadOrderBuffer.scala:46:20] end if (_GEN_13) begin // @[LoadOrderBuffer.scala:46:20, :64:26, :65:28] entries_1_head <= io_entry_head; // @[LoadOrderBuffer.scala:46:20] entries_1_tail <= io_entry_tail; // @[LoadOrderBuffer.scala:46:20] entries_1_masked <= io_entry_masked; // @[LoadOrderBuffer.scala:46:20] entries_1_lsiq_id <= io_entry_lsiq_id; // @[LoadOrderBuffer.scala:46:20] entries_1_page_offset <= io_entry_page_offset; // @[LoadOrderBuffer.scala:46:20] end if (_GEN_14) begin // @[LoadOrderBuffer.scala:46:20, :64:26, :65:28] entries_2_head <= io_entry_head; // @[LoadOrderBuffer.scala:46:20] entries_2_tail <= io_entry_tail; // @[LoadOrderBuffer.scala:46:20] entries_2_masked <= io_entry_masked; // @[LoadOrderBuffer.scala:46:20] entries_2_lsiq_id <= io_entry_lsiq_id; // @[LoadOrderBuffer.scala:46:20] entries_2_page_offset <= io_entry_page_offset; // @[LoadOrderBuffer.scala:46:20] end if (_GEN_15) begin // @[LoadOrderBuffer.scala:46:20, :64:26, :65:28] entries_3_head <= io_entry_head; // @[LoadOrderBuffer.scala:46:20] entries_3_tail <= io_entry_tail; // @[LoadOrderBuffer.scala:46:20] entries_3_masked <= io_entry_masked; // @[LoadOrderBuffer.scala:46:20] entries_3_lsiq_id <= io_entry_lsiq_id; // @[LoadOrderBuffer.scala:46:20] entries_3_page_offset <= io_entry_page_offset; // @[LoadOrderBuffer.scala:46:20] end if (_GEN_16) begin // @[LoadOrderBuffer.scala:46:20, :64:26, :65:28] entries_4_head <= io_entry_head; // @[LoadOrderBuffer.scala:46:20] entries_4_tail <= io_entry_tail; // @[LoadOrderBuffer.scala:46:20] entries_4_masked <= io_entry_masked; // @[LoadOrderBuffer.scala:46:20] entries_4_lsiq_id <= io_entry_lsiq_id; // @[LoadOrderBuffer.scala:46:20] entries_4_page_offset <= io_entry_page_offset; // @[LoadOrderBuffer.scala:46:20] end if (_GEN_17) begin // @[LoadOrderBuffer.scala:46:20, :64:26, :65:28] entries_5_head <= io_entry_head; // @[LoadOrderBuffer.scala:46:20] entries_5_tail <= io_entry_tail; // @[LoadOrderBuffer.scala:46:20] entries_5_masked <= io_entry_masked; // @[LoadOrderBuffer.scala:46:20] entries_5_lsiq_id <= io_entry_lsiq_id; // @[LoadOrderBuffer.scala:46:20] entries_5_page_offset <= io_entry_page_offset; // @[LoadOrderBuffer.scala:46:20] end if (_GEN_18) begin // @[LoadOrderBuffer.scala:46:20, :64:26, :65:28] entries_6_head <= io_entry_head; // @[LoadOrderBuffer.scala:46:20] entries_6_tail <= io_entry_tail; // @[LoadOrderBuffer.scala:46:20] entries_6_masked <= io_entry_masked; // @[LoadOrderBuffer.scala:46:20] entries_6_lsiq_id <= io_entry_lsiq_id; // @[LoadOrderBuffer.scala:46:20] entries_6_page_offset <= io_entry_page_offset; // @[LoadOrderBuffer.scala:46:20] end if (_GEN_19) begin // @[LoadOrderBuffer.scala:46:20, :64:26, :65:28] entries_7_head <= io_entry_head; // @[LoadOrderBuffer.scala:46:20] entries_7_tail <= io_entry_tail; // @[LoadOrderBuffer.scala:46:20] entries_7_masked <= io_entry_masked; // @[LoadOrderBuffer.scala:46:20] entries_7_lsiq_id <= io_entry_lsiq_id; // @[LoadOrderBuffer.scala:46:20] entries_7_page_offset <= io_entry_page_offset; // @[LoadOrderBuffer.scala:46:20] end if (~_GEN_20 | rob_full | ~_GEN_21) begin // @[LoadOrderBuffer.scala:47:21, :76:{23,81}, :77:21, :78:37, :81:34] end else // @[LoadOrderBuffer.scala:47:21, :76:81, :77:21, :81:34] rob_idxs_0 <= rob_next; // @[Mux.scala:50:70] if (~_GEN_20 | rob_full | ~_GEN_22) begin // @[LoadOrderBuffer.scala:47:21, :76:{23,81}, :77:21, :78:37, :81:34] end else // @[LoadOrderBuffer.scala:47:21, :76:81, :77:21, :81:34] rob_idxs_1 <= rob_next; // @[Mux.scala:50:70] if (~_GEN_20 | rob_full | ~_GEN_23) begin // @[LoadOrderBuffer.scala:47:21, :76:{23,81}, :77:21, :78:37, :81:34] end else // @[LoadOrderBuffer.scala:47:21, :76:81, :77:21, :81:34] rob_idxs_2 <= rob_next; // @[Mux.scala:50:70] if (~_GEN_20 | rob_full | ~_GEN_24) begin // @[LoadOrderBuffer.scala:47:21, :76:{23,81}, :77:21, :78:37, :81:34] end else // @[LoadOrderBuffer.scala:47:21, :76:81, :77:21, :81:34] rob_idxs_3 <= rob_next; // @[Mux.scala:50:70] if (~_GEN_20 | rob_full | ~_GEN_25) begin // @[LoadOrderBuffer.scala:47:21, :76:{23,81}, :77:21, :78:37, :81:34] end else // @[LoadOrderBuffer.scala:47:21, :76:81, :77:21, :81:34] rob_idxs_4 <= rob_next; // @[Mux.scala:50:70] if (~_GEN_20 | rob_full | ~_GEN_26) begin // @[LoadOrderBuffer.scala:47:21, :76:{23,81}, :77:21, :78:37, :81:34] end else // @[LoadOrderBuffer.scala:47:21, :76:81, :77:21, :81:34] rob_idxs_5 <= rob_next; // @[Mux.scala:50:70] if (~_GEN_20 | rob_full | ~_GEN_27) begin // @[LoadOrderBuffer.scala:47:21, :76:{23,81}, :77:21, :78:37, :81:34] end else // @[LoadOrderBuffer.scala:47:21, :76:81, :77:21, :81:34] rob_idxs_6 <= rob_next; // @[Mux.scala:50:70] if (~_GEN_20 | rob_full | ~(&io_push_bits_tag)) begin // @[LoadOrderBuffer.scala:47:21, :76:{23,81}, :77:21, :78:37, :81:34] end else // @[LoadOrderBuffer.scala:47:21, :76:81, :77:21, :81:34] rob_idxs_7 <= rob_next; // @[Mux.scala:50:70] if (~_GEN_20 | rob_full | ~_GEN_28) begin // @[LoadOrderBuffer.scala:47:21, :48:16, :76:{23,81}, :77:21, :82:32, :83:25] end else // @[LoadOrderBuffer.scala:48:16, :76:81, :77:21, :83:25] rob_0 <= io_push_bits_data; // @[LoadOrderBuffer.scala:48:16] if (~_GEN_20 | rob_full | ~_GEN_29) begin // @[LoadOrderBuffer.scala:47:21, :48:16, :76:{23,81}, :77:21, :82:32, :83:25] end else // @[LoadOrderBuffer.scala:48:16, :76:81, :77:21, :83:25] rob_1 <= io_push_bits_data; // @[LoadOrderBuffer.scala:48:16] if (~_GEN_20 | rob_full | ~_GEN_30) begin // @[LoadOrderBuffer.scala:47:21, :48:16, :76:{23,81}, :77:21, :82:32, :83:25] end else // @[LoadOrderBuffer.scala:48:16, :76:81, :77:21, :83:25] rob_2 <= io_push_bits_data; // @[LoadOrderBuffer.scala:48:16] if (~_GEN_20 | rob_full | ~(&rob_next)) begin // @[Mux.scala:50:70] end else // @[LoadOrderBuffer.scala:48:16, :76:81, :77:21, :83:25] rob_3 <= io_push_bits_data; // @[LoadOrderBuffer.scala:48:16] always @(posedge)
Generate the Verilog code corresponding to the following Chisel files. File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncQueue_1( // @[AsyncQueue.scala:226:7] input io_enq_clock, // @[AsyncQueue.scala:227:14] input io_enq_reset, // @[AsyncQueue.scala:227:14] output io_enq_ready, // @[AsyncQueue.scala:227:14] input io_enq_valid, // @[AsyncQueue.scala:227:14] input [31:0] io_enq_bits_phit, // @[AsyncQueue.scala:227:14] input io_deq_clock, // @[AsyncQueue.scala:227:14] input io_deq_reset, // @[AsyncQueue.scala:227:14] input io_deq_ready, // @[AsyncQueue.scala:227:14] output io_deq_valid, // @[AsyncQueue.scala:227:14] output [31:0] io_deq_bits_phit // @[AsyncQueue.scala:227:14] ); wire [3:0] _sink_io_async_ridx; // @[AsyncQueue.scala:229:70] wire _sink_io_async_safe_ridx_valid; // @[AsyncQueue.scala:229:70] wire _sink_io_async_safe_sink_reset_n; // @[AsyncQueue.scala:229:70] wire [31:0] _source_io_async_mem_0_phit; // @[AsyncQueue.scala:228:70] wire [31:0] _source_io_async_mem_1_phit; // @[AsyncQueue.scala:228:70] wire [31:0] _source_io_async_mem_2_phit; // @[AsyncQueue.scala:228:70] wire [31:0] _source_io_async_mem_3_phit; // @[AsyncQueue.scala:228:70] wire [31:0] _source_io_async_mem_4_phit; // @[AsyncQueue.scala:228:70] wire [31:0] _source_io_async_mem_5_phit; // @[AsyncQueue.scala:228:70] wire [31:0] _source_io_async_mem_6_phit; // @[AsyncQueue.scala:228:70] wire [31:0] _source_io_async_mem_7_phit; // @[AsyncQueue.scala:228:70] wire [3:0] _source_io_async_widx; // @[AsyncQueue.scala:228:70] wire _source_io_async_safe_widx_valid; // @[AsyncQueue.scala:228:70] wire _source_io_async_safe_source_reset_n; // @[AsyncQueue.scala:228:70] wire io_enq_clock_0 = io_enq_clock; // @[AsyncQueue.scala:226:7] wire io_enq_reset_0 = io_enq_reset; // @[AsyncQueue.scala:226:7] wire io_enq_valid_0 = io_enq_valid; // @[AsyncQueue.scala:226:7] wire [31:0] io_enq_bits_phit_0 = io_enq_bits_phit; // @[AsyncQueue.scala:226:7] wire io_deq_clock_0 = io_deq_clock; // @[AsyncQueue.scala:226:7] wire io_deq_reset_0 = io_deq_reset; // @[AsyncQueue.scala:226:7] wire io_deq_ready_0 = io_deq_ready; // @[AsyncQueue.scala:226:7] wire io_enq_ready_0; // @[AsyncQueue.scala:226:7] wire [31:0] io_deq_bits_phit_0; // @[AsyncQueue.scala:226:7] wire io_deq_valid_0; // @[AsyncQueue.scala:226:7] AsyncQueueSource_Phit_1 source ( // @[AsyncQueue.scala:228:70] .clock (io_enq_clock_0), // @[AsyncQueue.scala:226:7] .reset (io_enq_reset_0), // @[AsyncQueue.scala:226:7] .io_enq_ready (io_enq_ready_0), .io_enq_valid (io_enq_valid_0), // @[AsyncQueue.scala:226:7] .io_enq_bits_phit (io_enq_bits_phit_0), // @[AsyncQueue.scala:226:7] .io_async_mem_0_phit (_source_io_async_mem_0_phit), .io_async_mem_1_phit (_source_io_async_mem_1_phit), .io_async_mem_2_phit (_source_io_async_mem_2_phit), .io_async_mem_3_phit (_source_io_async_mem_3_phit), .io_async_mem_4_phit (_source_io_async_mem_4_phit), .io_async_mem_5_phit (_source_io_async_mem_5_phit), .io_async_mem_6_phit (_source_io_async_mem_6_phit), .io_async_mem_7_phit (_source_io_async_mem_7_phit), .io_async_ridx (_sink_io_async_ridx), // @[AsyncQueue.scala:229:70] .io_async_widx (_source_io_async_widx), .io_async_safe_ridx_valid (_sink_io_async_safe_ridx_valid), // @[AsyncQueue.scala:229:70] .io_async_safe_widx_valid (_source_io_async_safe_widx_valid), .io_async_safe_source_reset_n (_source_io_async_safe_source_reset_n), .io_async_safe_sink_reset_n (_sink_io_async_safe_sink_reset_n) // @[AsyncQueue.scala:229:70] ); // @[AsyncQueue.scala:228:70] AsyncQueueSink_Phit_1 sink ( // @[AsyncQueue.scala:229:70] .clock (io_deq_clock_0), // @[AsyncQueue.scala:226:7] .reset (io_deq_reset_0), // @[AsyncQueue.scala:226:7] .io_deq_ready (io_deq_ready_0), // @[AsyncQueue.scala:226:7] .io_deq_valid (io_deq_valid_0), .io_deq_bits_phit (io_deq_bits_phit_0), .io_async_mem_0_phit (_source_io_async_mem_0_phit), // @[AsyncQueue.scala:228:70] .io_async_mem_1_phit (_source_io_async_mem_1_phit), // @[AsyncQueue.scala:228:70] .io_async_mem_2_phit (_source_io_async_mem_2_phit), // @[AsyncQueue.scala:228:70] .io_async_mem_3_phit (_source_io_async_mem_3_phit), // @[AsyncQueue.scala:228:70] .io_async_mem_4_phit (_source_io_async_mem_4_phit), // @[AsyncQueue.scala:228:70] .io_async_mem_5_phit (_source_io_async_mem_5_phit), // @[AsyncQueue.scala:228:70] .io_async_mem_6_phit (_source_io_async_mem_6_phit), // @[AsyncQueue.scala:228:70] .io_async_mem_7_phit (_source_io_async_mem_7_phit), // @[AsyncQueue.scala:228:70] .io_async_ridx (_sink_io_async_ridx), .io_async_widx (_source_io_async_widx), // @[AsyncQueue.scala:228:70] .io_async_safe_ridx_valid (_sink_io_async_safe_ridx_valid), .io_async_safe_widx_valid (_source_io_async_safe_widx_valid), // @[AsyncQueue.scala:228:70] .io_async_safe_source_reset_n (_source_io_async_safe_source_reset_n), // @[AsyncQueue.scala:228:70] .io_async_safe_sink_reset_n (_sink_io_async_safe_sink_reset_n) ); // @[AsyncQueue.scala:229:70] assign io_enq_ready = io_enq_ready_0; // @[AsyncQueue.scala:226:7] assign io_deq_valid = io_deq_valid_0; // @[AsyncQueue.scala:226:7] assign io_deq_bits_phit = io_deq_bits_phit_0; // @[AsyncQueue.scala:226:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w4_d3_i0_11( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] input [3:0] io_d, // @[ShiftReg.scala:36:14] output [3:0] io_q // @[ShiftReg.scala:36:14] ); wire [3:0] io_d_0 = io_d; // @[SynchronizerReg.scala:80:7] wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_2 = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_4 = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_6 = reset; // @[SynchronizerReg.scala:86:21] wire [3:0] _io_q_T; // @[SynchronizerReg.scala:90:14] wire [3:0] io_q_0; // @[SynchronizerReg.scala:80:7] wire _output_T_1 = io_d_0[0]; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire _output_T_3 = io_d_0[1]; // @[SynchronizerReg.scala:80:7, :87:41] wire output_1; // @[ShiftReg.scala:48:24] wire _output_T_5 = io_d_0[2]; // @[SynchronizerReg.scala:80:7, :87:41] wire output_2; // @[ShiftReg.scala:48:24] wire _output_T_7 = io_d_0[3]; // @[SynchronizerReg.scala:80:7, :87:41] wire output_3; // @[ShiftReg.scala:48:24] wire [1:0] io_q_lo = {output_1, output_0}; // @[SynchronizerReg.scala:90:14] wire [1:0] io_q_hi = {output_3, output_2}; // @[SynchronizerReg.scala:90:14] assign _io_q_T = {io_q_hi, io_q_lo}; // @[SynchronizerReg.scala:90:14] assign io_q_0 = _io_q_T; // @[SynchronizerReg.scala:80:7, :90:14] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_122 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_1), // @[SynchronizerReg.scala:87:41] .io_q (output_0) ); // @[ShiftReg.scala:45:23] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_123 output_chain_1 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T_2), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_3), // @[SynchronizerReg.scala:87:41] .io_q (output_1) ); // @[ShiftReg.scala:45:23] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_124 output_chain_2 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T_4), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_5), // @[SynchronizerReg.scala:87:41] .io_q (output_2) ); // @[ShiftReg.scala:45:23] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_125 output_chain_3 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T_6), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_7), // @[SynchronizerReg.scala:87:41] .io_q (output_3) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File UnsafeAXI4ToTL.scala: package ara import chisel3._ import chisel3.util._ import freechips.rocketchip.amba._ import freechips.rocketchip.amba.axi4._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle { val data = UInt(dataWidth.W) val resp = UInt(respWidth.W) val last = Bool() val user = BundleMap(userFields) } /** Parameters for [[BaseReservableListBuffer]] and all child classes. * * @param numEntries Total number of elements that can be stored in the 'data' RAM * @param numLists Maximum number of linked lists * @param numBeats Maximum number of beats per entry */ case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) { // Avoid zero-width wires when we call 'log2Ceil' val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries) val listBits = if (numLists == 1) 1 else log2Ceil(numLists) val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats) } case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName) extends MixedAdapterNode(AXI4Imp, TLImp)( dFn = { case mp => TLMasterPortParameters.v2( masters = mp.masters.zipWithIndex.map { case (m, i) => // Support 'numTlTxns' read requests and 'numTlTxns' write requests at once. val numSourceIds = numTlTxns * 2 TLMasterParameters.v2( name = m.name, sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds), nodePath = m.nodePath ) }, echoFields = mp.echoFields, requestFields = AMBAProtField() +: mp.requestFields, responseKeys = mp.responseKeys ) }, uFn = { mp => AXI4SlavePortParameters( slaves = mp.managers.map { m => val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits)) AXI4SlaveParameters( address = m.address, resources = m.resources, regionType = m.regionType, executable = m.executable, nodePath = m.nodePath, supportsWrite = m.supportsPutPartial.intersect(maxXfer), supportsRead = m.supportsGet.intersect(maxXfer), interleavedId = Some(0) // TL2 never interleaves D beats ) }, beatBytes = mp.beatBytes, minLatency = mp.minLatency, responseFields = mp.responseFields, requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt) ) } ) class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule { require(numTlTxns >= 1) require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2") val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt) lazy val module = new LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => edgeIn.master.masters.foreach { m => require(m.aligned, "AXI4ToTL requires aligned requests") } val numIds = edgeIn.master.endId val beatBytes = edgeOut.slave.beatBytes val maxTransfer = edgeOut.slave.maxTransfer val maxBeats = maxTransfer / beatBytes // Look for an Error device to redirect bad requests val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError") require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.") val errorDev = errorDevs.maxBy(_.maxTransfer) val errorDevAddr = errorDev.address.head.base require( errorDev.supportsPutPartial.contains(maxTransfer), s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer" ) require( errorDev.supportsGet.contains(maxTransfer), s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer" ) // All of the read-response reordering logic. val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields) val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats) val listBuffer = if (numTlTxns > 1) { Module(new ReservableListBuffer(listBufData, listBufParams)) } else { Module(new PassthroughListBuffer(listBufData, listBufParams)) } // To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to // 0 for read requests and 1 for write requests. val isReadSourceBit = 0.U(1.W) val isWriteSourceBit = 1.U(1.W) /* Read request logic */ val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val rBytes1 = in.ar.bits.bytes1() val rSize = OH1ToUInt(rBytes1) val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize) val rId = if (numTlTxns > 1) { Cat(isReadSourceBit, listBuffer.ioReservedIndex) } else { isReadSourceBit } val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Indicates if there are still valid TileLink source IDs left to use. val canIssueR = listBuffer.ioReserve.ready listBuffer.ioReserve.bits := in.ar.bits.id listBuffer.ioReserve.valid := in.ar.valid && rOut.ready in.ar.ready := rOut.ready && canIssueR rOut.valid := in.ar.valid && canIssueR rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2 rOut.bits.user :<= in.ar.bits.user rOut.bits.user.lift(AMBAProt).foreach { rProt => rProt.privileged := in.ar.bits.prot(0) rProt.secure := !in.ar.bits.prot(1) rProt.fetch := in.ar.bits.prot(2) rProt.bufferable := in.ar.bits.cache(0) rProt.modifiable := in.ar.bits.cache(1) rProt.readalloc := in.ar.bits.cache(2) rProt.writealloc := in.ar.bits.cache(3) } /* Write request logic */ // Strip off the MSB, which identifies the transaction as read vs write. val strippedResponseSourceId = if (numTlTxns > 1) { out.d.bits.source((out.d.bits.source).getWidth - 2, 0) } else { // When there's only 1 TileLink transaction allowed for read/write, then this field is always 0. 0.U(1.W) } // Track when a write request burst is in progress. val writeBurstBusy = RegInit(false.B) when(in.w.fire) { writeBurstBusy := !in.w.bits.last } val usedWriteIds = RegInit(0.U(numTlTxns.W)) val canIssueW = !usedWriteIds.andR val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W)) val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W)) usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet // Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't // change mid-burst. val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W)) val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy val freeWriteIdIndex = OHToUInt(freeWriteIdOH) freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val wBytes1 = in.aw.bits.bytes1() val wSize = OH1ToUInt(wBytes1) val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize) val wId = if (numTlTxns > 1) { Cat(isWriteSourceBit, freeWriteIdIndex) } else { isWriteSourceBit } val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain // asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but // the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb // bits during a W-channel burst. in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW in.w.ready := wOut.ready && in.aw.valid && canIssueW wOut.valid := in.aw.valid && in.w.valid && canIssueW wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2 in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ } wOut.bits.user :<= in.aw.bits.user wOut.bits.user.lift(AMBAProt).foreach { wProt => wProt.privileged := in.aw.bits.prot(0) wProt.secure := !in.aw.bits.prot(1) wProt.fetch := in.aw.bits.prot(2) wProt.bufferable := in.aw.bits.cache(0) wProt.modifiable := in.aw.bits.cache(1) wProt.readalloc := in.aw.bits.cache(2) wProt.writealloc := in.aw.bits.cache(3) } // Merge the AXI4 read/write requests into the TL-A channel. TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut)) /* Read/write response logic */ val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle))) val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle))) val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY) val dHasData = edgeOut.hasData(out.d.bits) val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d) val dNumBeats1 = edgeOut.numBeats1(out.d.bits) // Handle cases where writeack arrives before write is done val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck) listBuffer.ioDataOut.ready := okR.ready okR.valid := listBuffer.ioDataOut.valid okB.valid := out.d.valid && !dHasData && !writeEarlyAck listBuffer.ioResponse.valid := out.d.valid && dHasData listBuffer.ioResponse.bits.index := strippedResponseSourceId listBuffer.ioResponse.bits.data.data := out.d.bits.data listBuffer.ioResponse.bits.data.resp := dResp listBuffer.ioResponse.bits.data.last := dLast listBuffer.ioResponse.bits.data.user :<= out.d.bits.user listBuffer.ioResponse.bits.count := dCount listBuffer.ioResponse.bits.numBeats1 := dNumBeats1 okR.bits.id := listBuffer.ioDataOut.bits.listIndex okR.bits.data := listBuffer.ioDataOut.bits.payload.data okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp okR.bits.last := listBuffer.ioDataOut.bits.payload.last okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user // Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write // response, mark the write transaction as complete. val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W)) val writeResponseId = writeIdMap.read(strippedResponseSourceId) when(wOut.fire) { writeIdMap.write(freeWriteIdIndex, in.aw.bits.id) } when(edgeOut.done(wOut)) { usedWriteIdsSet := freeWriteIdOH } when(okB.fire) { usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns) } okB.bits.id := writeResponseId okB.bits.resp := dResp okB.bits.user :<= out.d.bits.user // AXI4 needs irrevocable behaviour in.r <> Queue.irrevocable(okR, 1, flow = true) in.b <> Queue.irrevocable(okB, 1, flow = true) // Unused channels out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B /* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */ def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = { val lReqType = reqType.toLowerCase when(a.valid) { assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U) // Narrow transfers and FIXED bursts must be single-beat bursts. when(a.bits.len =/= 0.U) { assert( a.bits.size === log2Ceil(beatBytes).U, s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)", 1.U << a.bits.size, a.bits.len + 1.U ) assert( a.bits.burst =/= AXI4Parameters.BURST_FIXED, s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)", a.bits.len + 1.U ) } // Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in // particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink // Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts. } } checkRequest(in.ar, "Read") checkRequest(in.aw, "Write") } } } object UnsafeAXI4ToTL { def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = { val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt)) axi42tl.node } } /* ReservableListBuffer logic, and associated classes. */ class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle { val index = UInt(params.entryBits.W) val count = UInt(params.beatBits.W) val numBeats1 = UInt(params.beatBits.W) } class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle { val listIndex = UInt(params.listBits.W) } /** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */ abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends Module { require(params.numEntries > 0) require(params.numLists > 0) val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W)))) val ioReservedIndex = IO(Output(UInt(params.entryBits.W))) val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params)))) val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params))) } /** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve * linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the * 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a * given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order. * * ==Constructor== * @param gen Chisel type of linked list data element * @param params Other parameters * * ==Module IO== * @param ioReserve Index of list to reserve a new element in * @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire' * @param ioResponse Payload containing response data and linked-list-entry index * @param ioDataOut Payload containing data read from response linked list and linked list index */ class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { val valid = RegInit(0.U(params.numLists.W)) val head = Mem(params.numLists, UInt(params.entryBits.W)) val tail = Mem(params.numLists, UInt(params.entryBits.W)) val used = RegInit(0.U(params.numEntries.W)) val next = Mem(params.numEntries, UInt(params.entryBits.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) } val dataIsPresent = RegInit(0.U(params.numEntries.W)) val beats = Mem(params.numEntries, UInt(params.beatBits.W)) // The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower. val dataMemReadEnable = WireDefault(false.B) val dataMemWriteEnable = WireDefault(false.B) assert(!(dataMemReadEnable && dataMemWriteEnable)) // 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the // lowest-index entry in the 'data' RAM which is free. val freeOH = Wire(UInt(params.numEntries.W)) val freeIndex = OHToUInt(freeOH) freeOH := ~(leftOR(~used) << 1) & ~used ioReservedIndex := freeIndex val validSet = WireDefault(0.U(params.numLists.W)) val validClr = WireDefault(0.U(params.numLists.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) val dataIsPresentSet = WireDefault(0.U(params.numEntries.W)) val dataIsPresentClr = WireDefault(0.U(params.numEntries.W)) valid := (valid & ~validClr) | validSet used := (used & ~usedClr) | usedSet dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet /* Reservation logic signals */ val reserveTail = Wire(UInt(params.entryBits.W)) val reserveIsValid = Wire(Bool()) /* Response logic signals */ val responseIndex = Wire(UInt(params.entryBits.W)) val responseListIndex = Wire(UInt(params.listBits.W)) val responseHead = Wire(UInt(params.entryBits.W)) val responseTail = Wire(UInt(params.entryBits.W)) val nextResponseHead = Wire(UInt(params.entryBits.W)) val nextDataIsPresent = Wire(Bool()) val isResponseInOrder = Wire(Bool()) val isEndOfList = Wire(Bool()) val isLastBeat = Wire(Bool()) val isLastResponseBeat = Wire(Bool()) val isLastUnwindBeat = Wire(Bool()) /* Reservation logic */ reserveTail := tail.read(ioReserve.bits) reserveIsValid := valid(ioReserve.bits) ioReserve.ready := !used.andR // When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we // actually start a new list, rather than appending to a list that's about to disappear. val reserveResponseSameList = ioReserve.bits === responseListIndex val appendToAndDestroyList = ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat when(ioReserve.fire) { validSet := UIntToOH(ioReserve.bits, params.numLists) usedSet := freeOH when(reserveIsValid && !appendToAndDestroyList) { next.write(reserveTail, freeIndex) }.otherwise { head.write(ioReserve.bits, freeIndex) } tail.write(ioReserve.bits, freeIndex) map.write(freeIndex, ioReserve.bits) } /* Response logic */ // The majority of the response logic (reading from and writing to the various RAMs) is common between the // response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid). // The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the // 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and // response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after // two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker. responseHead := head.read(responseListIndex) responseTail := tail.read(responseListIndex) nextResponseHead := next.read(responseIndex) nextDataIsPresent := dataIsPresent(nextResponseHead) // Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since // there isn't a next element in the linked list. isResponseInOrder := responseHead === responseIndex isEndOfList := responseHead === responseTail isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1 // When a response's last beat is sent to the output channel, mark it as completed. This can happen in two // situations: // 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM // reservation was never needed. // 2. An entry is read out of the 'data' SRAM (within the unwind FSM). when(ioDataOut.fire && isLastBeat) { // Mark the reservation as no-longer-used. usedClr := UIntToOH(responseIndex, params.numEntries) // If the response is in-order, then we're popping an element from this linked list. when(isEndOfList) { // Once we pop the last element from a linked list, mark it as no-longer-present. validClr := UIntToOH(responseListIndex, params.numLists) }.otherwise { // Move the linked list's head pointer to the new head pointer. head.write(responseListIndex, nextResponseHead) } } // If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding. when(ioResponse.fire && !isResponseInOrder) { dataMemWriteEnable := true.B when(isLastResponseBeat) { dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries) beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1) } } // Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to. val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats) (responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) => when(select && dataMemWriteEnable) { seqMem.write(ioResponse.bits.index, ioResponse.bits.data) } } /* Response unwind logic */ // Unwind FSM state definitions val sIdle :: sUnwinding :: Nil = Enum(2) val unwindState = RegInit(sIdle) val busyUnwinding = unwindState === sUnwinding val startUnwind = Wire(Bool()) val stopUnwind = Wire(Bool()) when(startUnwind) { unwindState := sUnwinding }.elsewhen(stopUnwind) { unwindState := sIdle } assert(!(startUnwind && stopUnwind)) // Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to // become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is // invalid. // // Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to // worry about overwriting the 'data' SRAM's output when we start the unwind FSM. startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent // Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of // two things happens: // 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent) // 2. There are no more outstanding responses in this list (isEndOfList) // // Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are // passing from 'ioResponse' to 'ioDataOut'. stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList) val isUnwindBurstOver = Wire(Bool()) val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable) // Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of // beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we // increment 'beatCounter' until it reaches 'unwindBeats1'. val unwindBeats1 = Reg(UInt(params.beatBits.W)) val nextBeatCounter = Wire(UInt(params.beatBits.W)) val beatCounter = RegNext(nextBeatCounter) isUnwindBurstOver := beatCounter === unwindBeats1 when(startNewBurst) { unwindBeats1 := beats.read(nextResponseHead) nextBeatCounter := 0.U }.elsewhen(dataMemReadEnable) { nextBeatCounter := beatCounter + 1.U }.otherwise { nextBeatCounter := beatCounter } // When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next // entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which // happens at the start of reading a new stored burst). val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst) responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index) // Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the // SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead // holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'. val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex) // The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid // until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle). val unwindDataIsValid = RegInit(false.B) when(dataMemReadEnable) { unwindDataIsValid := true.B }.elsewhen(ioDataOut.fire) { unwindDataIsValid := false.B } isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid // Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats. isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat) // Select which SRAM to read from based on the beat counter. val dataOutputVec = Wire(Vec(params.numBeats, gen)) val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats) (nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) => dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable) } // Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured // by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading // from. val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable) // Mark 'data' burst entries as no-longer-present as they get read out of the SRAM. when(dataMemReadEnable) { dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries) } // As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue // a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know // we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be // consumed by the output channel). val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem) // While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need // 'responseListIndex' to be coherent for the entire unwind process. val rawResponseListIndex = map.read(responseIndex) val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst) responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex) // Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are // just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that // could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be // single-ported. ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding // Either pass an in-order response to the output channel, or data read from the unwind FSM. ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder) ioDataOut.bits.listIndex := responseListIndex ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data) // It's an error to get a response that isn't associated with a valid linked list. when(ioResponse.fire || unwindDataIsValid) { assert( valid(responseListIndex), "No linked list exists at index %d, mapped from %d", responseListIndex, responseIndex ) } when(busyUnwinding && dataMemReadEnable) { assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order") } } /** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1. * * Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to * reorder any responses, or store any linked lists. */ class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1") val used = RegInit(0.U(params.numEntries.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) used := (used & ~usedClr) | usedSet ioReserve.ready := used === 0.U // Store which list index was reserved, we need to return this value when we get a response. when(ioReserve.fire) { usedSet := 1.U map.write(0.U, ioReserve.bits) } // There's only one valid linked list entry, which is at index 0. ioReservedIndex := 0.U val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1 // Mark the linked list as empty when we get the last beat in a response. // Note that 'ioResponse.fire === ioDataOut.fire'. when(ioResponse.fire && isLastResponseBeat) { usedClr := 1.U } // Always pass the response data straight through, since we never need to reorder the response data. ioDataOut.bits.listIndex := map.read(0.U) ioDataOut.bits.payload := ioResponse.bits.data ioDataOut.valid := ioResponse.valid ioResponse.ready := ioDataOut.ready }
module dataMems_472( // @[UnsafeAXI4ToTL.scala:365:62] input [4:0] R0_addr, input R0_en, input R0_clk, output [66:0] R0_data, input [4:0] W0_addr, input W0_en, input W0_clk, input [66:0] W0_data ); dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62] .R0_addr (R0_addr), .R0_en (R0_en), .R0_clk (R0_clk), .R0_data (R0_data), .W0_addr (W0_addr), .W0_en (W0_en), .W0_clk (W0_clk), .W0_data (W0_data) ); // @[UnsafeAXI4ToTL.scala:365:62] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w1_d3_i0_133( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] output io_q // @[ShiftReg.scala:36:14] ); wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire io_d = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41] wire _output_T_1 = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire io_q_0; // @[SynchronizerReg.scala:80:7] assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_229 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_q (output_0) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File IngressUnit.scala: package constellation.router import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.util._ import constellation.channel._ class IngressUnit( ingressNodeId: Int, cParam: IngressChannelParams, outParams: Seq[ChannelParams], egressParams: Seq[EgressChannelParams], combineRCVA: Boolean, combineSAST: Boolean, ) (implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) { class IngressUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) { val in = Flipped(Decoupled(new IngressFlit(cParam.payloadBits))) } val io = IO(new IngressUnitIO) val route_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2)) val route_q = Module(new Queue(new RouteComputerResp(outParams, egressParams), 2, flow=combineRCVA)) assert(!(io.in.valid && !cParam.possibleFlows.toSeq.map(_.egressId.U === io.in.bits.egress_id).orR)) route_buffer.io.enq.bits.head := io.in.bits.head route_buffer.io.enq.bits.tail := io.in.bits.tail val flows = cParam.possibleFlows.toSeq if (flows.size == 0) { route_buffer.io.enq.bits.flow := DontCare } else { route_buffer.io.enq.bits.flow.ingress_node := cParam.destId.U route_buffer.io.enq.bits.flow.ingress_node_id := ingressNodeId.U route_buffer.io.enq.bits.flow.vnet_id := cParam.vNetId.U route_buffer.io.enq.bits.flow.egress_node := Mux1H( flows.map(_.egressId.U === io.in.bits.egress_id), flows.map(_.egressNode.U) ) route_buffer.io.enq.bits.flow.egress_node_id := Mux1H( flows.map(_.egressId.U === io.in.bits.egress_id), flows.map(_.egressNodeId.U) ) } route_buffer.io.enq.bits.payload := io.in.bits.payload route_buffer.io.enq.bits.virt_channel_id := DontCare io.router_req.bits.src_virt_id := 0.U io.router_req.bits.flow := route_buffer.io.enq.bits.flow val at_dest = route_buffer.io.enq.bits.flow.egress_node === nodeId.U route_buffer.io.enq.valid := io.in.valid && ( io.router_req.ready || !io.in.bits.head || at_dest) io.router_req.valid := io.in.valid && route_buffer.io.enq.ready && io.in.bits.head && !at_dest io.in.ready := route_buffer.io.enq.ready && ( io.router_req.ready || !io.in.bits.head || at_dest) route_q.io.enq.valid := io.router_req.fire route_q.io.enq.bits := io.router_resp when (io.in.fire && io.in.bits.head && at_dest) { route_q.io.enq.valid := true.B route_q.io.enq.bits.vc_sel.foreach(_.foreach(_ := false.B)) for (o <- 0 until nEgress) { when (egressParams(o).egressId.U === io.in.bits.egress_id) { route_q.io.enq.bits.vc_sel(o+nOutputs)(0) := true.B } } } assert(!(route_q.io.enq.valid && !route_q.io.enq.ready)) val vcalloc_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2)) val vcalloc_q = Module(new Queue(new VCAllocResp(outParams, egressParams), 1, pipe=true)) vcalloc_buffer.io.enq.bits := route_buffer.io.deq.bits io.vcalloc_req.bits.vc_sel := route_q.io.deq.bits.vc_sel io.vcalloc_req.bits.flow := route_buffer.io.deq.bits.flow io.vcalloc_req.bits.in_vc := 0.U val head = route_buffer.io.deq.bits.head val tail = route_buffer.io.deq.bits.tail vcalloc_buffer.io.enq.valid := (route_buffer.io.deq.valid && (route_q.io.deq.valid || !head) && (io.vcalloc_req.ready || !head) ) io.vcalloc_req.valid := (route_buffer.io.deq.valid && route_q.io.deq.valid && head && vcalloc_buffer.io.enq.ready && vcalloc_q.io.enq.ready) route_buffer.io.deq.ready := (vcalloc_buffer.io.enq.ready && (route_q.io.deq.valid || !head) && (io.vcalloc_req.ready || !head) && (vcalloc_q.io.enq.ready || !head)) route_q.io.deq.ready := (route_buffer.io.deq.fire && tail) vcalloc_q.io.enq.valid := io.vcalloc_req.fire vcalloc_q.io.enq.bits := io.vcalloc_resp assert(!(vcalloc_q.io.enq.valid && !vcalloc_q.io.enq.ready)) io.salloc_req(0).bits.vc_sel := vcalloc_q.io.deq.bits.vc_sel io.salloc_req(0).bits.tail := vcalloc_buffer.io.deq.bits.tail val c = (vcalloc_q.io.deq.bits.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U val vcalloc_tail = vcalloc_buffer.io.deq.bits.tail io.salloc_req(0).valid := vcalloc_buffer.io.deq.valid && vcalloc_q.io.deq.valid && c && !io.block vcalloc_buffer.io.deq.ready := io.salloc_req(0).ready && vcalloc_q.io.deq.valid && c && !io.block vcalloc_q.io.deq.ready := vcalloc_tail && vcalloc_buffer.io.deq.fire val out_bundle = if (combineSAST) { Wire(Valid(new SwitchBundle(outParams, egressParams))) } else { Reg(Valid(new SwitchBundle(outParams, egressParams))) } io.out(0) := out_bundle out_bundle.valid := vcalloc_buffer.io.deq.fire out_bundle.bits.flit := vcalloc_buffer.io.deq.bits out_bundle.bits.flit.virt_channel_id := 0.U val out_channel_oh = vcalloc_q.io.deq.bits.vc_sel.map(_.reduce(_||_)).toSeq out_bundle.bits.out_virt_channel := Mux1H(out_channel_oh, vcalloc_q.io.deq.bits.vc_sel.map(v => OHToUInt(v)).toSeq) io.debug.va_stall := io.vcalloc_req.valid && !io.vcalloc_req.ready io.debug.sa_stall := io.salloc_req(0).valid && !io.salloc_req(0).ready // TODO: We should not generate input/ingress/output/egress units for untraversable channels if (!cParam.traversable) { io.in.ready := false.B io.router_req.valid := false.B io.router_req.bits := DontCare io.vcalloc_req.valid := false.B io.vcalloc_req.bits := DontCare io.salloc_req.foreach(_.valid := false.B) io.salloc_req.foreach(_.bits := DontCare) io.out.foreach(_.valid := false.B) io.out.foreach(_.bits := DontCare) } }
module IngressUnit_7( // @[IngressUnit.scala:11:7] input clock, // @[IngressUnit.scala:11:7] input reset // @[IngressUnit.scala:11:7] ); endmodule
Generate the Verilog code corresponding to the following Chisel files. File Crossing.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.interrupts import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.util.{SynchronizerShiftReg, AsyncResetReg} @deprecated("IntXing does not ensure interrupt source is glitch free. Use IntSyncSource and IntSyncSink", "rocket-chip 1.2") class IntXing(sync: Int = 3)(implicit p: Parameters) extends LazyModule { val intnode = IntAdapterNode() lazy val module = new Impl class Impl extends LazyModuleImp(this) { (intnode.in zip intnode.out) foreach { case ((in, _), (out, _)) => out := SynchronizerShiftReg(in, sync) } } } object IntSyncCrossingSource { def apply(alreadyRegistered: Boolean = false)(implicit p: Parameters) = { val intsource = LazyModule(new IntSyncCrossingSource(alreadyRegistered)) intsource.node } } class IntSyncCrossingSource(alreadyRegistered: Boolean = false)(implicit p: Parameters) extends LazyModule { val node = IntSyncSourceNode(alreadyRegistered) lazy val module = if (alreadyRegistered) (new ImplRegistered) else (new Impl) class Impl extends LazyModuleImp(this) { def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0) override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.sync := AsyncResetReg(Cat(in.reverse)).asBools } } class ImplRegistered extends LazyRawModuleImp(this) { def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0) override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}_Registered" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.sync := in } } } object IntSyncCrossingSink { @deprecated("IntSyncCrossingSink which used the `sync` parameter to determine crossing type is deprecated. Use IntSyncAsyncCrossingSink, IntSyncRationalCrossingSink, or IntSyncSyncCrossingSink instead for > 1, 1, and 0 sync values respectively", "rocket-chip 1.2") def apply(sync: Int = 3)(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync)) intsink.node } } class IntSyncAsyncCrossingSink(sync: Int = 3)(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(sync) lazy val module = new Impl class Impl extends LazyModuleImp(this) { override def desiredName = s"IntSyncAsyncCrossingSink_n${node.out.size}x${node.out.head._1.size}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := SynchronizerShiftReg(in.sync, sync) } } } object IntSyncAsyncCrossingSink { def apply(sync: Int = 3)(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync)) intsink.node } } class IntSyncSyncCrossingSink()(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(0) lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { def outSize = node.out.headOption.map(_._1.size).getOrElse(0) override def desiredName = s"IntSyncSyncCrossingSink_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := in.sync } } } object IntSyncSyncCrossingSink { def apply()(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncSyncCrossingSink()) intsink.node } } class IntSyncRationalCrossingSink()(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(1) lazy val module = new Impl class Impl extends LazyModuleImp(this) { def outSize = node.out.headOption.map(_._1.size).getOrElse(0) override def desiredName = s"IntSyncRationalCrossingSink_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := RegNext(in.sync) } } } object IntSyncRationalCrossingSink { def apply()(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncRationalCrossingSink()) intsink.node } } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } } File AsyncResetReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ /** This black-boxes an Async Reset * (or Set) * Register. * * Because Chisel doesn't support * parameterized black boxes, * we unfortunately have to * instantiate a number of these. * * We also have to hard-code the set/ * reset behavior. * * Do not confuse an asynchronous * reset signal with an asynchronously * reset reg. You should still * properly synchronize your reset * deassertion. * * @param d Data input * @param q Data Output * @param clk Clock Input * @param rst Reset Input * @param en Write Enable Input * */ class AsyncResetReg(resetValue: Int = 0) extends RawModule { val io = IO(new Bundle { val d = Input(Bool()) val q = Output(Bool()) val en = Input(Bool()) val clk = Input(Clock()) val rst = Input(Reset()) }) val reg = withClockAndReset(io.clk, io.rst.asAsyncReset)(RegInit(resetValue.U(1.W))) when (io.en) { reg := io.d } io.q := reg } class SimpleRegIO(val w: Int) extends Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) } class AsyncResetRegVec(val w: Int, val init: BigInt) extends Module { override def desiredName = s"AsyncResetRegVec_w${w}_i${init}" val io = IO(new SimpleRegIO(w)) val reg = withReset(reset.asAsyncReset)(RegInit(init.U(w.W))) when (io.en) { reg := io.d } io.q := reg } object AsyncResetReg { // Create Single Registers def apply(d: Bool, clk: Clock, rst: Bool, init: Boolean, name: Option[String]): Bool = { val reg = Module(new AsyncResetReg(if (init) 1 else 0)) reg.io.d := d reg.io.clk := clk reg.io.rst := rst reg.io.en := true.B name.foreach(reg.suggestName(_)) reg.io.q } def apply(d: Bool, clk: Clock, rst: Bool): Bool = apply(d, clk, rst, false, None) def apply(d: Bool, clk: Clock, rst: Bool, name: String): Bool = apply(d, clk, rst, false, Some(name)) // Create Vectors of Registers def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: Option[String] = None): UInt = { val w = updateData.getWidth max resetData.bitLength val reg = Module(new AsyncResetRegVec(w, resetData)) name.foreach(reg.suggestName(_)) reg.io.d := updateData reg.io.en := enable reg.io.q } def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: String): UInt = apply(updateData, resetData, enable, Some(name)) def apply(updateData: UInt, resetData: BigInt): UInt = apply(updateData, resetData, enable = true.B) def apply(updateData: UInt, resetData: BigInt, name: String): UInt = apply(updateData, resetData, enable = true.B, Some(name)) def apply(updateData: UInt, enable: Bool): UInt = apply(updateData, resetData=BigInt(0), enable) def apply(updateData: UInt, enable: Bool, name: String): UInt = apply(updateData, resetData = BigInt(0), enable, Some(name)) def apply(updateData: UInt): UInt = apply(updateData, resetData = BigInt(0), enable = true.B) def apply(updateData: UInt, name:String): UInt = apply(updateData, resetData = BigInt(0), enable = true.B, Some(name)) }
module IntSyncCrossingSource_n1x1_46( // @[Crossing.scala:41:9] input clock, // @[Crossing.scala:41:9] input reset, // @[Crossing.scala:41:9] input auto_in_0, // @[LazyModuleImp.scala:107:25] output auto_out_sync_0 // @[LazyModuleImp.scala:107:25] ); wire auto_in_0_0 = auto_in_0; // @[Crossing.scala:41:9] wire nodeIn_0 = auto_in_0_0; // @[Crossing.scala:41:9] wire nodeOut_sync_0; // @[MixedNode.scala:542:17] wire auto_out_sync_0_0; // @[Crossing.scala:41:9] assign auto_out_sync_0_0 = nodeOut_sync_0; // @[Crossing.scala:41:9] AsyncResetRegVec_w1_i0_46 reg_0 ( // @[AsyncResetReg.scala:86:21] .clock (clock), .reset (reset), .io_d (nodeIn_0), // @[MixedNode.scala:551:17] .io_q (nodeOut_sync_0) ); // @[AsyncResetReg.scala:86:21] assign auto_out_sync_0 = auto_out_sync_0_0; // @[Crossing.scala:41:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag }
module OptimizationBarrier_TLBEntryData_53( // @[package.scala:267:30] input clock, // @[package.scala:267:30] input reset, // @[package.scala:267:30] input [19:0] io_x_ppn, // @[package.scala:268:18] input io_x_u, // @[package.scala:268:18] input io_x_g, // @[package.scala:268:18] input io_x_ae_ptw, // @[package.scala:268:18] input io_x_ae_final, // @[package.scala:268:18] input io_x_ae_stage2, // @[package.scala:268:18] input io_x_pf, // @[package.scala:268:18] input io_x_gf, // @[package.scala:268:18] input io_x_sw, // @[package.scala:268:18] input io_x_sx, // @[package.scala:268:18] input io_x_sr, // @[package.scala:268:18] input io_x_hw, // @[package.scala:268:18] input io_x_hx, // @[package.scala:268:18] input io_x_hr, // @[package.scala:268:18] input io_x_pw, // @[package.scala:268:18] input io_x_px, // @[package.scala:268:18] input io_x_pr, // @[package.scala:268:18] input io_x_ppp, // @[package.scala:268:18] input io_x_pal, // @[package.scala:268:18] input io_x_paa, // @[package.scala:268:18] input io_x_eff, // @[package.scala:268:18] input io_x_c, // @[package.scala:268:18] input io_x_fragmented_superpage, // @[package.scala:268:18] output [19:0] io_y_ppn, // @[package.scala:268:18] output io_y_u, // @[package.scala:268:18] output io_y_ae_ptw, // @[package.scala:268:18] output io_y_ae_final, // @[package.scala:268:18] output io_y_ae_stage2, // @[package.scala:268:18] output io_y_pf, // @[package.scala:268:18] output io_y_gf, // @[package.scala:268:18] output io_y_sw, // @[package.scala:268:18] output io_y_sx, // @[package.scala:268:18] output io_y_sr, // @[package.scala:268:18] output io_y_hw, // @[package.scala:268:18] output io_y_hx, // @[package.scala:268:18] output io_y_hr, // @[package.scala:268:18] output io_y_pw, // @[package.scala:268:18] output io_y_px, // @[package.scala:268:18] output io_y_pr, // @[package.scala:268:18] output io_y_ppp, // @[package.scala:268:18] output io_y_pal, // @[package.scala:268:18] output io_y_paa, // @[package.scala:268:18] output io_y_eff, // @[package.scala:268:18] output io_y_c // @[package.scala:268:18] ); wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30] wire io_x_u_0 = io_x_u; // @[package.scala:267:30] wire io_x_g_0 = io_x_g; // @[package.scala:267:30] wire io_x_ae_ptw_0 = io_x_ae_ptw; // @[package.scala:267:30] wire io_x_ae_final_0 = io_x_ae_final; // @[package.scala:267:30] wire io_x_ae_stage2_0 = io_x_ae_stage2; // @[package.scala:267:30] wire io_x_pf_0 = io_x_pf; // @[package.scala:267:30] wire io_x_gf_0 = io_x_gf; // @[package.scala:267:30] wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30] wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30] wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30] wire io_x_hw_0 = io_x_hw; // @[package.scala:267:30] wire io_x_hx_0 = io_x_hx; // @[package.scala:267:30] wire io_x_hr_0 = io_x_hr; // @[package.scala:267:30] wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30] wire io_x_px_0 = io_x_px; // @[package.scala:267:30] wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30] wire io_x_ppp_0 = io_x_ppp; // @[package.scala:267:30] wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30] wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30] wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30] wire io_x_c_0 = io_x_c; // @[package.scala:267:30] wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30] wire [19:0] io_y_ppn_0 = io_x_ppn_0; // @[package.scala:267:30] wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30] wire io_y_g = io_x_g_0; // @[package.scala:267:30] wire io_y_ae_ptw_0 = io_x_ae_ptw_0; // @[package.scala:267:30] wire io_y_ae_final_0 = io_x_ae_final_0; // @[package.scala:267:30] wire io_y_ae_stage2_0 = io_x_ae_stage2_0; // @[package.scala:267:30] wire io_y_pf_0 = io_x_pf_0; // @[package.scala:267:30] wire io_y_gf_0 = io_x_gf_0; // @[package.scala:267:30] wire io_y_sw_0 = io_x_sw_0; // @[package.scala:267:30] wire io_y_sx_0 = io_x_sx_0; // @[package.scala:267:30] wire io_y_sr_0 = io_x_sr_0; // @[package.scala:267:30] wire io_y_hw_0 = io_x_hw_0; // @[package.scala:267:30] wire io_y_hx_0 = io_x_hx_0; // @[package.scala:267:30] wire io_y_hr_0 = io_x_hr_0; // @[package.scala:267:30] wire io_y_pw_0 = io_x_pw_0; // @[package.scala:267:30] wire io_y_px_0 = io_x_px_0; // @[package.scala:267:30] wire io_y_pr_0 = io_x_pr_0; // @[package.scala:267:30] wire io_y_ppp_0 = io_x_ppp_0; // @[package.scala:267:30] wire io_y_pal_0 = io_x_pal_0; // @[package.scala:267:30] wire io_y_paa_0 = io_x_paa_0; // @[package.scala:267:30] wire io_y_eff_0 = io_x_eff_0; // @[package.scala:267:30] wire io_y_c_0 = io_x_c_0; // @[package.scala:267:30] wire io_y_fragmented_superpage = io_x_fragmented_superpage_0; // @[package.scala:267:30] assign io_y_ppn = io_y_ppn_0; // @[package.scala:267:30] assign io_y_u = io_y_u_0; // @[package.scala:267:30] assign io_y_ae_ptw = io_y_ae_ptw_0; // @[package.scala:267:30] assign io_y_ae_final = io_y_ae_final_0; // @[package.scala:267:30] assign io_y_ae_stage2 = io_y_ae_stage2_0; // @[package.scala:267:30] assign io_y_pf = io_y_pf_0; // @[package.scala:267:30] assign io_y_gf = io_y_gf_0; // @[package.scala:267:30] assign io_y_sw = io_y_sw_0; // @[package.scala:267:30] assign io_y_sx = io_y_sx_0; // @[package.scala:267:30] assign io_y_sr = io_y_sr_0; // @[package.scala:267:30] assign io_y_hw = io_y_hw_0; // @[package.scala:267:30] assign io_y_hx = io_y_hx_0; // @[package.scala:267:30] assign io_y_hr = io_y_hr_0; // @[package.scala:267:30] assign io_y_pw = io_y_pw_0; // @[package.scala:267:30] assign io_y_px = io_y_px_0; // @[package.scala:267:30] assign io_y_pr = io_y_pr_0; // @[package.scala:267:30] assign io_y_ppp = io_y_ppp_0; // @[package.scala:267:30] assign io_y_pal = io_y_pal_0; // @[package.scala:267:30] assign io_y_paa = io_y_paa_0; // @[package.scala:267:30] assign io_y_eff = io_y_eff_0; // @[package.scala:267:30] assign io_y_c = io_y_c_0; // @[package.scala:267:30] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Crossing.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.interrupts import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.util.{SynchronizerShiftReg, AsyncResetReg} @deprecated("IntXing does not ensure interrupt source is glitch free. Use IntSyncSource and IntSyncSink", "rocket-chip 1.2") class IntXing(sync: Int = 3)(implicit p: Parameters) extends LazyModule { val intnode = IntAdapterNode() lazy val module = new Impl class Impl extends LazyModuleImp(this) { (intnode.in zip intnode.out) foreach { case ((in, _), (out, _)) => out := SynchronizerShiftReg(in, sync) } } } object IntSyncCrossingSource { def apply(alreadyRegistered: Boolean = false)(implicit p: Parameters) = { val intsource = LazyModule(new IntSyncCrossingSource(alreadyRegistered)) intsource.node } } class IntSyncCrossingSource(alreadyRegistered: Boolean = false)(implicit p: Parameters) extends LazyModule { val node = IntSyncSourceNode(alreadyRegistered) lazy val module = if (alreadyRegistered) (new ImplRegistered) else (new Impl) class Impl extends LazyModuleImp(this) { def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0) override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.sync := AsyncResetReg(Cat(in.reverse)).asBools } } class ImplRegistered extends LazyRawModuleImp(this) { def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0) override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}_Registered" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.sync := in } } } object IntSyncCrossingSink { @deprecated("IntSyncCrossingSink which used the `sync` parameter to determine crossing type is deprecated. Use IntSyncAsyncCrossingSink, IntSyncRationalCrossingSink, or IntSyncSyncCrossingSink instead for > 1, 1, and 0 sync values respectively", "rocket-chip 1.2") def apply(sync: Int = 3)(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync)) intsink.node } } class IntSyncAsyncCrossingSink(sync: Int = 3)(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(sync) lazy val module = new Impl class Impl extends LazyModuleImp(this) { override def desiredName = s"IntSyncAsyncCrossingSink_n${node.out.size}x${node.out.head._1.size}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := SynchronizerShiftReg(in.sync, sync) } } } object IntSyncAsyncCrossingSink { def apply(sync: Int = 3)(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync)) intsink.node } } class IntSyncSyncCrossingSink()(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(0) lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { def outSize = node.out.headOption.map(_._1.size).getOrElse(0) override def desiredName = s"IntSyncSyncCrossingSink_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := in.sync } } } object IntSyncSyncCrossingSink { def apply()(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncSyncCrossingSink()) intsink.node } } class IntSyncRationalCrossingSink()(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(1) lazy val module = new Impl class Impl extends LazyModuleImp(this) { def outSize = node.out.headOption.map(_._1.size).getOrElse(0) override def desiredName = s"IntSyncRationalCrossingSink_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := RegNext(in.sync) } } } object IntSyncRationalCrossingSink { def apply()(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncRationalCrossingSink()) intsink.node } } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } }
module IntSyncSyncCrossingSink_n1x2_11( // @[Crossing.scala:96:9] input auto_in_sync_0, // @[LazyModuleImp.scala:107:25] input auto_in_sync_1, // @[LazyModuleImp.scala:107:25] output auto_out_0, // @[LazyModuleImp.scala:107:25] output auto_out_1 // @[LazyModuleImp.scala:107:25] ); wire auto_in_sync_0_0 = auto_in_sync_0; // @[Crossing.scala:96:9] wire auto_in_sync_1_0 = auto_in_sync_1; // @[Crossing.scala:96:9] wire childClock = 1'h0; // @[LazyModuleImp.scala:155:31] wire childReset = 1'h0; // @[LazyModuleImp.scala:158:31] wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire nodeIn_sync_0 = auto_in_sync_0_0; // @[Crossing.scala:96:9] wire nodeIn_sync_1 = auto_in_sync_1_0; // @[Crossing.scala:96:9] wire nodeOut_0; // @[MixedNode.scala:542:17] wire nodeOut_1; // @[MixedNode.scala:542:17] wire auto_out_0_0; // @[Crossing.scala:96:9] wire auto_out_1_0; // @[Crossing.scala:96:9] assign nodeOut_0 = nodeIn_sync_0; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_1 = nodeIn_sync_1; // @[MixedNode.scala:542:17, :551:17] assign auto_out_0_0 = nodeOut_0; // @[Crossing.scala:96:9] assign auto_out_1_0 = nodeOut_1; // @[Crossing.scala:96:9] assign auto_out_0 = auto_out_0_0; // @[Crossing.scala:96:9] assign auto_out_1 = auto_out_1_0; // @[Crossing.scala:96:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_310( // @[SynchronizerReg.scala:68:19] input clock, // @[SynchronizerReg.scala:68:19] input reset, // @[SynchronizerReg.scala:68:19] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19] wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19] wire io_q_0; // @[SynchronizerReg.scala:68:19] reg sync_0; // @[SynchronizerReg.scala:51:87] assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19] reg sync_1; // @[SynchronizerReg.scala:51:87] reg sync_2; // @[SynchronizerReg.scala:51:87] always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19] if (reset) begin // @[SynchronizerReg.scala:68:19] sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87] end else begin // @[SynchronizerReg.scala:68:19] sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87] sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87] sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22] end always @(posedge, posedge)
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_121( // @[AsyncQueue.scala:58:7] input io_in, // @[AsyncQueue.scala:59:14] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_142 io_out_source_valid_1 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_d (io_in_0), // @[AsyncQueue.scala:58:7] .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_42( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [15:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [127:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_b_ready, // @[Monitor.scala:20:14] input io_in_b_valid, // @[Monitor.scala:20:14] input [2:0] io_in_b_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_b_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_b_bits_size, // @[Monitor.scala:20:14] input [3:0] io_in_b_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_b_bits_address, // @[Monitor.scala:20:14] input [15:0] io_in_b_bits_mask, // @[Monitor.scala:20:14] input [127:0] io_in_b_bits_data, // @[Monitor.scala:20:14] input io_in_b_bits_corrupt, // @[Monitor.scala:20:14] input io_in_c_ready, // @[Monitor.scala:20:14] input io_in_c_valid, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_c_bits_size, // @[Monitor.scala:20:14] input [3:0] io_in_c_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_c_bits_address, // @[Monitor.scala:20:14] input [127:0] io_in_c_bits_data, // @[Monitor.scala:20:14] input io_in_c_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input [127:0] io_in_d_bits_data, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt, // @[Monitor.scala:20:14] input io_in_e_ready, // @[Monitor.scala:20:14] input io_in_e_valid, // @[Monitor.scala:20:14] input [3:0] io_in_e_bits_sink // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [3:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [3:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [31:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [15:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [127:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_b_ready_0 = io_in_b_ready; // @[Monitor.scala:36:7] wire io_in_b_valid_0 = io_in_b_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_b_bits_opcode_0 = io_in_b_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_b_bits_param_0 = io_in_b_bits_param; // @[Monitor.scala:36:7] wire [3:0] io_in_b_bits_size_0 = io_in_b_bits_size; // @[Monitor.scala:36:7] wire [3:0] io_in_b_bits_source_0 = io_in_b_bits_source; // @[Monitor.scala:36:7] wire [31:0] io_in_b_bits_address_0 = io_in_b_bits_address; // @[Monitor.scala:36:7] wire [15:0] io_in_b_bits_mask_0 = io_in_b_bits_mask; // @[Monitor.scala:36:7] wire [127:0] io_in_b_bits_data_0 = io_in_b_bits_data; // @[Monitor.scala:36:7] wire io_in_b_bits_corrupt_0 = io_in_b_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_c_ready_0 = io_in_c_ready; // @[Monitor.scala:36:7] wire io_in_c_valid_0 = io_in_c_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_c_bits_opcode_0 = io_in_c_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_c_bits_param_0 = io_in_c_bits_param; // @[Monitor.scala:36:7] wire [3:0] io_in_c_bits_size_0 = io_in_c_bits_size; // @[Monitor.scala:36:7] wire [3:0] io_in_c_bits_source_0 = io_in_c_bits_source; // @[Monitor.scala:36:7] wire [31:0] io_in_c_bits_address_0 = io_in_c_bits_address; // @[Monitor.scala:36:7] wire [127:0] io_in_c_bits_data_0 = io_in_c_bits_data; // @[Monitor.scala:36:7] wire io_in_c_bits_corrupt_0 = io_in_c_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7] wire [3:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [3:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire [3:0] io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7] wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7] wire [127:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_e_ready_0 = io_in_e_ready; // @[Monitor.scala:36:7] wire io_in_e_valid_0 = io_in_e_valid; // @[Monitor.scala:36:7] wire [3:0] io_in_e_bits_sink_0 = io_in_e_bits_sink; // @[Monitor.scala:36:7] wire [15:0] _a_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _c_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hFF; // @[Monitor.scala:724:57] wire [16:0] _a_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _c_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hFF; // @[Monitor.scala:724:57] wire [15:0] _a_size_lookup_T_3 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _c_size_lookup_T_3 = 16'h100; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h100; // @[Monitor.scala:724:51] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [7:0] b_first_beats1 = 8'h0; // @[Edges.scala:221:14] wire [7:0] b_first_count = 8'h0; // @[Edges.scala:234:25] wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:56:32] wire sink_ok = 1'h1; // @[Monitor.scala:309:31] wire _legal_source_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_20 = 1'h1; // @[Parameters.scala:56:32] wire sink_ok_1 = 1'h1; // @[Monitor.scala:367:31] wire _b_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire b_first_last = 1'h1; // @[Edges.scala:232:33] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _a_size_lookup_T_2 = 4'h8; // @[Monitor.scala:641:117] wire [3:0] _d_sizes_clr_T = 4'h8; // @[Monitor.scala:681:48] wire [3:0] _c_size_lookup_T_2 = 4'h8; // @[Monitor.scala:750:119] wire [3:0] _d_sizes_clr_T_6 = 4'h8; // @[Monitor.scala:791:48] wire _legal_source_T_8 = 1'h0; // @[Mux.scala:30:73] wire [3:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire [3:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _mask_sizeOH_T_3 = io_in_b_bits_size_0; // @[Misc.scala:202:34] wire [3:0] _uncommonBits_T_11 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _legal_source_uncommonBits_T = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_12 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T = io_in_b_bits_address_0; // @[Monitor.scala:36:7] wire [3:0] _source_ok_uncommonBits_T_2 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_13 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_14 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_15 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_16 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [3:0] _uncommonBits_T_17 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T_70 = io_in_c_bits_address_0; // @[Monitor.scala:36:7] wire [3:0] _source_ok_uncommonBits_T_1 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [2:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[2:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T = io_in_a_bits_source_0[3]; // @[Monitor.scala:36:7] wire _source_ok_T_1 = ~_source_ok_T; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_3 = _source_ok_T_1; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_4 = source_ok_uncommonBits < 3'h5; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_5 = _source_ok_T_3 & _source_ok_T_4; // @[Parameters.scala:54:67, :56:48, :57:20] wire _source_ok_WIRE_0 = _source_ok_T_5; // @[Parameters.scala:1138:31] wire _source_ok_T_6 = io_in_a_bits_source_0 == 4'h5; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31] wire _source_ok_T_7 = io_in_a_bits_source_0 == 4'h8; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2 = _source_ok_T_7; // @[Parameters.scala:1138:31] wire _source_ok_T_8 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46] wire source_ok = _source_ok_T_8 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46] wire [26:0] _GEN = 27'hFFF << io_in_a_bits_size_0; // @[package.scala:243:71] wire [26:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [11:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [31:0] _is_aligned_T = {20'h0, io_in_a_bits_address_0[11:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 32'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [3:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1; // @[OneHot.scala:65:{12,27}] wire [3:0] mask_sizeOH = {_mask_sizeOH_T_2[3:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_sub_0_1 = |(io_in_a_bits_size_0[3:2]); // @[Misc.scala:206:21] wire mask_sub_sub_sub_size = mask_sizeOH[3]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_sub_bit = io_in_a_bits_address_0[3]; // @[Misc.scala:210:26] wire mask_sub_sub_sub_1_2 = mask_sub_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_sub_nbit = ~mask_sub_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_sub_0_2 = mask_sub_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_sub_acc_T = mask_sub_sub_sub_size & mask_sub_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_sub_0_1 = mask_sub_sub_sub_sub_0_1 | _mask_sub_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_sub_acc_T_1 = mask_sub_sub_sub_size & mask_sub_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_sub_1_1 = mask_sub_sub_sub_sub_0_1 | _mask_sub_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_sub_0_2 & mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_sub_1_2 = mask_sub_sub_sub_0_2 & mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_sub_2_2 = mask_sub_sub_sub_1_2 & mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T_2 = mask_sub_sub_size & mask_sub_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_2_1 = mask_sub_sub_sub_1_1 | _mask_sub_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_sub_3_2 = mask_sub_sub_sub_1_2 & mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_sub_acc_T_3 = mask_sub_sub_size & mask_sub_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_3_1 = mask_sub_sub_sub_1_1 | _mask_sub_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_sub_4_2 = mask_sub_sub_2_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_4 = mask_sub_size & mask_sub_4_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_4_1 = mask_sub_sub_2_1 | _mask_sub_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_sub_5_2 = mask_sub_sub_2_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_5 = mask_sub_size & mask_sub_5_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_5_1 = mask_sub_sub_2_1 | _mask_sub_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_sub_6_2 = mask_sub_sub_3_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_6 = mask_sub_size & mask_sub_6_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_6_1 = mask_sub_sub_3_1 | _mask_sub_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_sub_7_2 = mask_sub_sub_3_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_7 = mask_sub_size & mask_sub_7_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_7_1 = mask_sub_sub_3_1 | _mask_sub_acc_T_7; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire mask_eq_8 = mask_sub_4_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_8 = mask_size & mask_eq_8; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_8 = mask_sub_4_1 | _mask_acc_T_8; // @[Misc.scala:215:{29,38}] wire mask_eq_9 = mask_sub_4_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_9 = mask_size & mask_eq_9; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_9 = mask_sub_4_1 | _mask_acc_T_9; // @[Misc.scala:215:{29,38}] wire mask_eq_10 = mask_sub_5_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_10 = mask_size & mask_eq_10; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_10 = mask_sub_5_1 | _mask_acc_T_10; // @[Misc.scala:215:{29,38}] wire mask_eq_11 = mask_sub_5_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_11 = mask_size & mask_eq_11; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_11 = mask_sub_5_1 | _mask_acc_T_11; // @[Misc.scala:215:{29,38}] wire mask_eq_12 = mask_sub_6_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_12 = mask_size & mask_eq_12; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_12 = mask_sub_6_1 | _mask_acc_T_12; // @[Misc.scala:215:{29,38}] wire mask_eq_13 = mask_sub_6_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_13 = mask_size & mask_eq_13; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_13 = mask_sub_6_1 | _mask_acc_T_13; // @[Misc.scala:215:{29,38}] wire mask_eq_14 = mask_sub_7_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_14 = mask_size & mask_eq_14; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_14 = mask_sub_7_1 | _mask_acc_T_14; // @[Misc.scala:215:{29,38}] wire mask_eq_15 = mask_sub_7_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_15 = mask_size & mask_eq_15; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_15 = mask_sub_7_1 | _mask_acc_T_15; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo_lo = {mask_lo_lo_hi, mask_lo_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_lo_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo_hi = {mask_lo_hi_hi, mask_lo_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo_lo = {mask_acc_9, mask_acc_8}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_lo_hi = {mask_acc_11, mask_acc_10}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi_lo = {mask_hi_lo_hi, mask_hi_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_hi_lo = {mask_acc_13, mask_acc_12}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi_hi = {mask_acc_15, mask_acc_14}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi_hi = {mask_hi_hi_hi, mask_hi_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [15:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [2:0] uncommonBits = _uncommonBits_T[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_1 = _uncommonBits_T_1[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_2 = _uncommonBits_T_2[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_3 = _uncommonBits_T_3[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_4 = _uncommonBits_T_4[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_5 = _uncommonBits_T_5[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_6 = _uncommonBits_T_6[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_7 = _uncommonBits_T_7[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_8 = _uncommonBits_T_8[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_9 = _uncommonBits_T_9[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_10 = _uncommonBits_T_10[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[2:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_9 = io_in_d_bits_source_0[3]; // @[Monitor.scala:36:7] wire _source_ok_T_10 = ~_source_ok_T_9; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_13 = source_ok_uncommonBits_1 < 3'h5; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_14 = _source_ok_T_12 & _source_ok_T_13; // @[Parameters.scala:54:67, :56:48, :57:20] wire _source_ok_WIRE_1_0 = _source_ok_T_14; // @[Parameters.scala:1138:31] wire _source_ok_T_15 = io_in_d_bits_source_0 == 4'h5; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_1 = _source_ok_T_15; // @[Parameters.scala:1138:31] wire _source_ok_T_16 = io_in_d_bits_source_0 == 4'h8; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_2 = _source_ok_T_16; // @[Parameters.scala:1138:31] wire _source_ok_T_17 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46] wire source_ok_1 = _source_ok_T_17 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46] wire [2:0] uncommonBits_11 = _uncommonBits_T_11[2:0]; // @[Parameters.scala:52:{29,56}] wire _legal_source_T = io_in_b_bits_source_0[3]; // @[Monitor.scala:36:7] wire _legal_source_T_6 = io_in_b_bits_source_0 == 4'h5; // @[Monitor.scala:36:7] wire _legal_source_T_7 = io_in_b_bits_source_0 == 4'h8; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_1 = {1'h0, _address_ok_T}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_2 = _address_ok_T_1 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_3 = _address_ok_T_2; // @[Parameters.scala:137:46] wire _address_ok_T_4 = _address_ok_T_3 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_0 = _address_ok_T_4; // @[Parameters.scala:612:40] wire [31:0] _address_ok_T_5 = {io_in_b_bits_address_0[31:13], io_in_b_bits_address_0[12:0] ^ 13'h1000}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_6 = {1'h0, _address_ok_T_5}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_7 = _address_ok_T_6 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_8 = _address_ok_T_7; // @[Parameters.scala:137:46] wire _address_ok_T_9 = _address_ok_T_8 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_1 = _address_ok_T_9; // @[Parameters.scala:612:40] wire [13:0] _GEN_0 = io_in_b_bits_address_0[13:0] ^ 14'h3000; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T_10 = {io_in_b_bits_address_0[31:14], _GEN_0}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_11 = {1'h0, _address_ok_T_10}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_12 = _address_ok_T_11 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_13 = _address_ok_T_12; // @[Parameters.scala:137:46] wire _address_ok_T_14 = _address_ok_T_13 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_2 = _address_ok_T_14; // @[Parameters.scala:612:40] wire [16:0] _GEN_1 = io_in_b_bits_address_0[16:0] ^ 17'h10000; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T_15 = {io_in_b_bits_address_0[31:17], _GEN_1}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_16 = {1'h0, _address_ok_T_15}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_17 = _address_ok_T_16 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_18 = _address_ok_T_17; // @[Parameters.scala:137:46] wire _address_ok_T_19 = _address_ok_T_18 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_3 = _address_ok_T_19; // @[Parameters.scala:612:40] wire [20:0] _GEN_2 = io_in_b_bits_address_0[20:0] ^ 21'h100000; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T_20 = {io_in_b_bits_address_0[31:21], _GEN_2}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_21 = {1'h0, _address_ok_T_20}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_22 = _address_ok_T_21 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_23 = _address_ok_T_22; // @[Parameters.scala:137:46] wire _address_ok_T_24 = _address_ok_T_23 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_4 = _address_ok_T_24; // @[Parameters.scala:612:40] wire [31:0] _address_ok_T_25 = {io_in_b_bits_address_0[31:21], io_in_b_bits_address_0[20:0] ^ 21'h110000}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_26 = {1'h0, _address_ok_T_25}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_27 = _address_ok_T_26 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_28 = _address_ok_T_27; // @[Parameters.scala:137:46] wire _address_ok_T_29 = _address_ok_T_28 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_5 = _address_ok_T_29; // @[Parameters.scala:612:40] wire [25:0] _GEN_3 = io_in_b_bits_address_0[25:0] ^ 26'h2000000; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T_30 = {io_in_b_bits_address_0[31:26], _GEN_3}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_31 = {1'h0, _address_ok_T_30}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_32 = _address_ok_T_31 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_33 = _address_ok_T_32; // @[Parameters.scala:137:46] wire _address_ok_T_34 = _address_ok_T_33 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_6 = _address_ok_T_34; // @[Parameters.scala:612:40] wire [25:0] _GEN_4 = io_in_b_bits_address_0[25:0] ^ 26'h2010000; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T_35 = {io_in_b_bits_address_0[31:26], _GEN_4}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_36 = {1'h0, _address_ok_T_35}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_37 = _address_ok_T_36 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_38 = _address_ok_T_37; // @[Parameters.scala:137:46] wire _address_ok_T_39 = _address_ok_T_38 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_7 = _address_ok_T_39; // @[Parameters.scala:612:40] wire [27:0] _GEN_5 = io_in_b_bits_address_0[27:0] ^ 28'h8000000; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T_40 = {io_in_b_bits_address_0[31:28], _GEN_5}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_41 = {1'h0, _address_ok_T_40}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_42 = _address_ok_T_41 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_43 = _address_ok_T_42; // @[Parameters.scala:137:46] wire _address_ok_T_44 = _address_ok_T_43 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_8 = _address_ok_T_44; // @[Parameters.scala:612:40] wire [27:0] _GEN_6 = io_in_b_bits_address_0[27:0] ^ 28'hC000000; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T_45 = {io_in_b_bits_address_0[31:28], _GEN_6}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_46 = {1'h0, _address_ok_T_45}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_47 = _address_ok_T_46 & 33'h1FC000000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_48 = _address_ok_T_47; // @[Parameters.scala:137:46] wire _address_ok_T_49 = _address_ok_T_48 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_9 = _address_ok_T_49; // @[Parameters.scala:612:40] wire [28:0] _GEN_7 = io_in_b_bits_address_0[28:0] ^ 29'h10020000; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T_50 = {io_in_b_bits_address_0[31:29], _GEN_7}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_51 = {1'h0, _address_ok_T_50}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_52 = _address_ok_T_51 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_53 = _address_ok_T_52; // @[Parameters.scala:137:46] wire _address_ok_T_54 = _address_ok_T_53 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_10 = _address_ok_T_54; // @[Parameters.scala:612:40] wire [31:0] _address_ok_T_55 = io_in_b_bits_address_0 ^ 32'h80000000; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_56 = {1'h0, _address_ok_T_55}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_57 = _address_ok_T_56 & 33'h1F0000000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_58 = _address_ok_T_57; // @[Parameters.scala:137:46] wire _address_ok_T_59 = _address_ok_T_58 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_11 = _address_ok_T_59; // @[Parameters.scala:612:40] wire _address_ok_T_60 = _address_ok_WIRE_0 | _address_ok_WIRE_1; // @[Parameters.scala:612:40, :636:64] wire _address_ok_T_61 = _address_ok_T_60 | _address_ok_WIRE_2; // @[Parameters.scala:612:40, :636:64] wire _address_ok_T_62 = _address_ok_T_61 | _address_ok_WIRE_3; // @[Parameters.scala:612:40, :636:64] wire _address_ok_T_63 = _address_ok_T_62 | _address_ok_WIRE_4; // @[Parameters.scala:612:40, :636:64] wire _address_ok_T_64 = _address_ok_T_63 | _address_ok_WIRE_5; // @[Parameters.scala:612:40, :636:64] wire _address_ok_T_65 = _address_ok_T_64 | _address_ok_WIRE_6; // @[Parameters.scala:612:40, :636:64] wire _address_ok_T_66 = _address_ok_T_65 | _address_ok_WIRE_7; // @[Parameters.scala:612:40, :636:64] wire _address_ok_T_67 = _address_ok_T_66 | _address_ok_WIRE_8; // @[Parameters.scala:612:40, :636:64] wire _address_ok_T_68 = _address_ok_T_67 | _address_ok_WIRE_9; // @[Parameters.scala:612:40, :636:64] wire _address_ok_T_69 = _address_ok_T_68 | _address_ok_WIRE_10; // @[Parameters.scala:612:40, :636:64] wire address_ok = _address_ok_T_69 | _address_ok_WIRE_11; // @[Parameters.scala:612:40, :636:64] wire [26:0] _GEN_8 = 27'hFFF << io_in_b_bits_size_0; // @[package.scala:243:71] wire [26:0] _is_aligned_mask_T_2; // @[package.scala:243:71] assign _is_aligned_mask_T_2 = _GEN_8; // @[package.scala:243:71] wire [26:0] _b_first_beats1_decode_T; // @[package.scala:243:71] assign _b_first_beats1_decode_T = _GEN_8; // @[package.scala:243:71] wire [11:0] _is_aligned_mask_T_3 = _is_aligned_mask_T_2[11:0]; // @[package.scala:243:{71,76}] wire [11:0] is_aligned_mask_1 = ~_is_aligned_mask_T_3; // @[package.scala:243:{46,76}] wire [31:0] _is_aligned_T_1 = {20'h0, io_in_b_bits_address_0[11:0] & is_aligned_mask_1}; // @[package.scala:243:46] wire is_aligned_1 = _is_aligned_T_1 == 32'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount_1 = _mask_sizeOH_T_3[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_4 = 4'h1 << mask_sizeOH_shiftAmount_1; // @[OneHot.scala:64:49, :65:12] wire [3:0] _mask_sizeOH_T_5 = _mask_sizeOH_T_4; // @[OneHot.scala:65:{12,27}] wire [3:0] mask_sizeOH_1 = {_mask_sizeOH_T_5[3:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_sub_0_1_1 = |(io_in_b_bits_size_0[3:2]); // @[Misc.scala:206:21] wire mask_sub_sub_sub_size_1 = mask_sizeOH_1[3]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_sub_bit_1 = io_in_b_bits_address_0[3]; // @[Misc.scala:210:26] wire mask_sub_sub_sub_1_2_1 = mask_sub_sub_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_sub_nbit_1 = ~mask_sub_sub_sub_bit_1; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_sub_0_2_1 = mask_sub_sub_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_sub_acc_T_2 = mask_sub_sub_sub_size_1 & mask_sub_sub_sub_0_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_sub_0_1_1 = mask_sub_sub_sub_sub_0_1_1 | _mask_sub_sub_sub_acc_T_2; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_sub_acc_T_3 = mask_sub_sub_sub_size_1 & mask_sub_sub_sub_1_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_sub_1_1_1 = mask_sub_sub_sub_sub_0_1_1 | _mask_sub_sub_sub_acc_T_3; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_sub_size_1 = mask_sizeOH_1[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit_1 = io_in_b_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_nbit_1 = ~mask_sub_sub_bit_1; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2_1 = mask_sub_sub_sub_0_2_1 & mask_sub_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T_4 = mask_sub_sub_size_1 & mask_sub_sub_0_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1_1 = mask_sub_sub_sub_0_1_1 | _mask_sub_sub_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_sub_sub_1_2_1 = mask_sub_sub_sub_0_2_1 & mask_sub_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_sub_sub_acc_T_5 = mask_sub_sub_size_1 & mask_sub_sub_1_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1_1 = mask_sub_sub_sub_0_1_1 | _mask_sub_sub_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_sub_sub_2_2_1 = mask_sub_sub_sub_1_2_1 & mask_sub_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T_6 = mask_sub_sub_size_1 & mask_sub_sub_2_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_2_1_1 = mask_sub_sub_sub_1_1_1 | _mask_sub_sub_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_sub_sub_3_2_1 = mask_sub_sub_sub_1_2_1 & mask_sub_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_sub_sub_acc_T_7 = mask_sub_sub_size_1 & mask_sub_sub_3_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_3_1_1 = mask_sub_sub_sub_1_1_1 | _mask_sub_sub_acc_T_7; // @[Misc.scala:215:{29,38}] wire mask_sub_size_1 = mask_sizeOH_1[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit_1 = io_in_b_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit_1 = ~mask_sub_bit_1; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2_1 = mask_sub_sub_0_2_1 & mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_8 = mask_sub_size_1 & mask_sub_0_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1_1 = mask_sub_sub_0_1_1 | _mask_sub_acc_T_8; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2_1 = mask_sub_sub_0_2_1 & mask_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_9 = mask_sub_size_1 & mask_sub_1_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1_1 = mask_sub_sub_0_1_1 | _mask_sub_acc_T_9; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2_1 = mask_sub_sub_1_2_1 & mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_10 = mask_sub_size_1 & mask_sub_2_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1_1 = mask_sub_sub_1_1_1 | _mask_sub_acc_T_10; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2_1 = mask_sub_sub_1_2_1 & mask_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_11 = mask_sub_size_1 & mask_sub_3_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1_1 = mask_sub_sub_1_1_1 | _mask_sub_acc_T_11; // @[Misc.scala:215:{29,38}] wire mask_sub_4_2_1 = mask_sub_sub_2_2_1 & mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_12 = mask_sub_size_1 & mask_sub_4_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_4_1_1 = mask_sub_sub_2_1_1 | _mask_sub_acc_T_12; // @[Misc.scala:215:{29,38}] wire mask_sub_5_2_1 = mask_sub_sub_2_2_1 & mask_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_13 = mask_sub_size_1 & mask_sub_5_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_5_1_1 = mask_sub_sub_2_1_1 | _mask_sub_acc_T_13; // @[Misc.scala:215:{29,38}] wire mask_sub_6_2_1 = mask_sub_sub_3_2_1 & mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_14 = mask_sub_size_1 & mask_sub_6_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_6_1_1 = mask_sub_sub_3_1_1 | _mask_sub_acc_T_14; // @[Misc.scala:215:{29,38}] wire mask_sub_7_2_1 = mask_sub_sub_3_2_1 & mask_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_15 = mask_sub_size_1 & mask_sub_7_2_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_7_1_1 = mask_sub_sub_3_1_1 | _mask_sub_acc_T_15; // @[Misc.scala:215:{29,38}] wire mask_size_1 = mask_sizeOH_1[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit_1 = io_in_b_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit_1 = ~mask_bit_1; // @[Misc.scala:210:26, :211:20] wire mask_eq_16 = mask_sub_0_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_16 = mask_size_1 & mask_eq_16; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_16 = mask_sub_0_1_1 | _mask_acc_T_16; // @[Misc.scala:215:{29,38}] wire mask_eq_17 = mask_sub_0_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_17 = mask_size_1 & mask_eq_17; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_17 = mask_sub_0_1_1 | _mask_acc_T_17; // @[Misc.scala:215:{29,38}] wire mask_eq_18 = mask_sub_1_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_18 = mask_size_1 & mask_eq_18; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_18 = mask_sub_1_1_1 | _mask_acc_T_18; // @[Misc.scala:215:{29,38}] wire mask_eq_19 = mask_sub_1_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_19 = mask_size_1 & mask_eq_19; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_19 = mask_sub_1_1_1 | _mask_acc_T_19; // @[Misc.scala:215:{29,38}] wire mask_eq_20 = mask_sub_2_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_20 = mask_size_1 & mask_eq_20; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_20 = mask_sub_2_1_1 | _mask_acc_T_20; // @[Misc.scala:215:{29,38}] wire mask_eq_21 = mask_sub_2_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_21 = mask_size_1 & mask_eq_21; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_21 = mask_sub_2_1_1 | _mask_acc_T_21; // @[Misc.scala:215:{29,38}] wire mask_eq_22 = mask_sub_3_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_22 = mask_size_1 & mask_eq_22; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_22 = mask_sub_3_1_1 | _mask_acc_T_22; // @[Misc.scala:215:{29,38}] wire mask_eq_23 = mask_sub_3_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_23 = mask_size_1 & mask_eq_23; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_23 = mask_sub_3_1_1 | _mask_acc_T_23; // @[Misc.scala:215:{29,38}] wire mask_eq_24 = mask_sub_4_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_24 = mask_size_1 & mask_eq_24; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_24 = mask_sub_4_1_1 | _mask_acc_T_24; // @[Misc.scala:215:{29,38}] wire mask_eq_25 = mask_sub_4_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_25 = mask_size_1 & mask_eq_25; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_25 = mask_sub_4_1_1 | _mask_acc_T_25; // @[Misc.scala:215:{29,38}] wire mask_eq_26 = mask_sub_5_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_26 = mask_size_1 & mask_eq_26; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_26 = mask_sub_5_1_1 | _mask_acc_T_26; // @[Misc.scala:215:{29,38}] wire mask_eq_27 = mask_sub_5_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_27 = mask_size_1 & mask_eq_27; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_27 = mask_sub_5_1_1 | _mask_acc_T_27; // @[Misc.scala:215:{29,38}] wire mask_eq_28 = mask_sub_6_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_28 = mask_size_1 & mask_eq_28; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_28 = mask_sub_6_1_1 | _mask_acc_T_28; // @[Misc.scala:215:{29,38}] wire mask_eq_29 = mask_sub_6_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_29 = mask_size_1 & mask_eq_29; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_29 = mask_sub_6_1_1 | _mask_acc_T_29; // @[Misc.scala:215:{29,38}] wire mask_eq_30 = mask_sub_7_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_30 = mask_size_1 & mask_eq_30; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_30 = mask_sub_7_1_1 | _mask_acc_T_30; // @[Misc.scala:215:{29,38}] wire mask_eq_31 = mask_sub_7_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_31 = mask_size_1 & mask_eq_31; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_31 = mask_sub_7_1_1 | _mask_acc_T_31; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo_lo_1 = {mask_acc_17, mask_acc_16}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_lo_hi_1 = {mask_acc_19, mask_acc_18}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo_lo_1 = {mask_lo_lo_hi_1, mask_lo_lo_lo_1}; // @[Misc.scala:222:10] wire [1:0] mask_lo_hi_lo_1 = {mask_acc_21, mask_acc_20}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi_hi_1 = {mask_acc_23, mask_acc_22}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo_hi_1 = {mask_lo_hi_hi_1, mask_lo_hi_lo_1}; // @[Misc.scala:222:10] wire [7:0] mask_lo_1 = {mask_lo_hi_1, mask_lo_lo_1}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo_lo_1 = {mask_acc_25, mask_acc_24}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_lo_hi_1 = {mask_acc_27, mask_acc_26}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi_lo_1 = {mask_hi_lo_hi_1, mask_hi_lo_lo_1}; // @[Misc.scala:222:10] wire [1:0] mask_hi_hi_lo_1 = {mask_acc_29, mask_acc_28}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi_hi_1 = {mask_acc_31, mask_acc_30}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi_hi_1 = {mask_hi_hi_hi_1, mask_hi_hi_lo_1}; // @[Misc.scala:222:10] wire [7:0] mask_hi_1 = {mask_hi_hi_1, mask_hi_lo_1}; // @[Misc.scala:222:10] wire [15:0] mask_1 = {mask_hi_1, mask_lo_1}; // @[Misc.scala:222:10] wire [2:0] legal_source_uncommonBits = _legal_source_uncommonBits_T[2:0]; // @[Parameters.scala:52:{29,56}] wire _legal_source_T_1 = ~_legal_source_T; // @[Parameters.scala:54:{10,32}] wire _legal_source_T_3 = _legal_source_T_1; // @[Parameters.scala:54:{32,67}] wire _legal_source_T_4 = legal_source_uncommonBits < 3'h5; // @[Parameters.scala:52:56, :57:20] wire _legal_source_T_5 = _legal_source_T_3 & _legal_source_T_4; // @[Parameters.scala:54:67, :56:48, :57:20] wire _legal_source_WIRE_0 = _legal_source_T_5; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_1 = _legal_source_T_6; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_2 = _legal_source_T_7; // @[Parameters.scala:1138:31] wire [2:0] _legal_source_T_9 = _legal_source_WIRE_1 ? 3'h5 : 3'h0; // @[Mux.scala:30:73] wire [2:0] _legal_source_T_11 = _legal_source_T_9; // @[Mux.scala:30:73] wire [3:0] _legal_source_T_10 = {_legal_source_WIRE_2, 3'h0}; // @[Mux.scala:30:73] wire [3:0] _legal_source_T_12 = {1'h0, _legal_source_T_11} | _legal_source_T_10; // @[Mux.scala:30:73] wire [3:0] _legal_source_WIRE_1_0 = _legal_source_T_12; // @[Mux.scala:30:73] wire legal_source = _legal_source_WIRE_1_0 == io_in_b_bits_source_0; // @[Mux.scala:30:73] wire [2:0] uncommonBits_12 = _uncommonBits_T_12[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[2:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_18 = io_in_c_bits_source_0[3]; // @[Monitor.scala:36:7] wire _source_ok_T_19 = ~_source_ok_T_18; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_21 = _source_ok_T_19; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_22 = source_ok_uncommonBits_2 < 3'h5; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_23 = _source_ok_T_21 & _source_ok_T_22; // @[Parameters.scala:54:67, :56:48, :57:20] wire _source_ok_WIRE_2_0 = _source_ok_T_23; // @[Parameters.scala:1138:31] wire _source_ok_T_24 = io_in_c_bits_source_0 == 4'h5; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_1 = _source_ok_T_24; // @[Parameters.scala:1138:31] wire _source_ok_T_25 = io_in_c_bits_source_0 == 4'h8; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_2 = _source_ok_T_25; // @[Parameters.scala:1138:31] wire _source_ok_T_26 = _source_ok_WIRE_2_0 | _source_ok_WIRE_2_1; // @[Parameters.scala:1138:31, :1139:46] wire source_ok_2 = _source_ok_T_26 | _source_ok_WIRE_2_2; // @[Parameters.scala:1138:31, :1139:46] wire [26:0] _GEN_9 = 27'hFFF << io_in_c_bits_size_0; // @[package.scala:243:71] wire [26:0] _is_aligned_mask_T_4; // @[package.scala:243:71] assign _is_aligned_mask_T_4 = _GEN_9; // @[package.scala:243:71] wire [26:0] _c_first_beats1_decode_T; // @[package.scala:243:71] assign _c_first_beats1_decode_T = _GEN_9; // @[package.scala:243:71] wire [26:0] _c_first_beats1_decode_T_3; // @[package.scala:243:71] assign _c_first_beats1_decode_T_3 = _GEN_9; // @[package.scala:243:71] wire [11:0] _is_aligned_mask_T_5 = _is_aligned_mask_T_4[11:0]; // @[package.scala:243:{71,76}] wire [11:0] is_aligned_mask_2 = ~_is_aligned_mask_T_5; // @[package.scala:243:{46,76}] wire [31:0] _is_aligned_T_2 = {20'h0, io_in_c_bits_address_0[11:0] & is_aligned_mask_2}; // @[package.scala:243:46] wire is_aligned_2 = _is_aligned_T_2 == 32'h0; // @[Edges.scala:21:{16,24}] wire [32:0] _address_ok_T_71 = {1'h0, _address_ok_T_70}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_72 = _address_ok_T_71 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_73 = _address_ok_T_72; // @[Parameters.scala:137:46] wire _address_ok_T_74 = _address_ok_T_73 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_1_0 = _address_ok_T_74; // @[Parameters.scala:612:40] wire [31:0] _address_ok_T_75 = {io_in_c_bits_address_0[31:13], io_in_c_bits_address_0[12:0] ^ 13'h1000}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_76 = {1'h0, _address_ok_T_75}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_77 = _address_ok_T_76 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_78 = _address_ok_T_77; // @[Parameters.scala:137:46] wire _address_ok_T_79 = _address_ok_T_78 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_1_1 = _address_ok_T_79; // @[Parameters.scala:612:40] wire [13:0] _GEN_10 = io_in_c_bits_address_0[13:0] ^ 14'h3000; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T_80 = {io_in_c_bits_address_0[31:14], _GEN_10}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_81 = {1'h0, _address_ok_T_80}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_82 = _address_ok_T_81 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_83 = _address_ok_T_82; // @[Parameters.scala:137:46] wire _address_ok_T_84 = _address_ok_T_83 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_1_2 = _address_ok_T_84; // @[Parameters.scala:612:40] wire [16:0] _GEN_11 = io_in_c_bits_address_0[16:0] ^ 17'h10000; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T_85 = {io_in_c_bits_address_0[31:17], _GEN_11}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_86 = {1'h0, _address_ok_T_85}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_87 = _address_ok_T_86 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_88 = _address_ok_T_87; // @[Parameters.scala:137:46] wire _address_ok_T_89 = _address_ok_T_88 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_1_3 = _address_ok_T_89; // @[Parameters.scala:612:40] wire [20:0] _GEN_12 = io_in_c_bits_address_0[20:0] ^ 21'h100000; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T_90 = {io_in_c_bits_address_0[31:21], _GEN_12}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_91 = {1'h0, _address_ok_T_90}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_92 = _address_ok_T_91 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_93 = _address_ok_T_92; // @[Parameters.scala:137:46] wire _address_ok_T_94 = _address_ok_T_93 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_1_4 = _address_ok_T_94; // @[Parameters.scala:612:40] wire [31:0] _address_ok_T_95 = {io_in_c_bits_address_0[31:21], io_in_c_bits_address_0[20:0] ^ 21'h110000}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_96 = {1'h0, _address_ok_T_95}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_97 = _address_ok_T_96 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_98 = _address_ok_T_97; // @[Parameters.scala:137:46] wire _address_ok_T_99 = _address_ok_T_98 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_1_5 = _address_ok_T_99; // @[Parameters.scala:612:40] wire [25:0] _GEN_13 = io_in_c_bits_address_0[25:0] ^ 26'h2000000; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T_100 = {io_in_c_bits_address_0[31:26], _GEN_13}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_101 = {1'h0, _address_ok_T_100}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_102 = _address_ok_T_101 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_103 = _address_ok_T_102; // @[Parameters.scala:137:46] wire _address_ok_T_104 = _address_ok_T_103 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_1_6 = _address_ok_T_104; // @[Parameters.scala:612:40] wire [25:0] _GEN_14 = io_in_c_bits_address_0[25:0] ^ 26'h2010000; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T_105 = {io_in_c_bits_address_0[31:26], _GEN_14}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_106 = {1'h0, _address_ok_T_105}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_107 = _address_ok_T_106 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_108 = _address_ok_T_107; // @[Parameters.scala:137:46] wire _address_ok_T_109 = _address_ok_T_108 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_1_7 = _address_ok_T_109; // @[Parameters.scala:612:40] wire [27:0] _GEN_15 = io_in_c_bits_address_0[27:0] ^ 28'h8000000; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T_110 = {io_in_c_bits_address_0[31:28], _GEN_15}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_111 = {1'h0, _address_ok_T_110}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_112 = _address_ok_T_111 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_113 = _address_ok_T_112; // @[Parameters.scala:137:46] wire _address_ok_T_114 = _address_ok_T_113 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_1_8 = _address_ok_T_114; // @[Parameters.scala:612:40] wire [27:0] _GEN_16 = io_in_c_bits_address_0[27:0] ^ 28'hC000000; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T_115 = {io_in_c_bits_address_0[31:28], _GEN_16}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_116 = {1'h0, _address_ok_T_115}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_117 = _address_ok_T_116 & 33'h1FC000000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_118 = _address_ok_T_117; // @[Parameters.scala:137:46] wire _address_ok_T_119 = _address_ok_T_118 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_1_9 = _address_ok_T_119; // @[Parameters.scala:612:40] wire [28:0] _GEN_17 = io_in_c_bits_address_0[28:0] ^ 29'h10020000; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T_120 = {io_in_c_bits_address_0[31:29], _GEN_17}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_121 = {1'h0, _address_ok_T_120}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_122 = _address_ok_T_121 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_123 = _address_ok_T_122; // @[Parameters.scala:137:46] wire _address_ok_T_124 = _address_ok_T_123 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_1_10 = _address_ok_T_124; // @[Parameters.scala:612:40] wire [31:0] _address_ok_T_125 = io_in_c_bits_address_0 ^ 32'h80000000; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_126 = {1'h0, _address_ok_T_125}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_127 = _address_ok_T_126 & 33'h1F0000000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_128 = _address_ok_T_127; // @[Parameters.scala:137:46] wire _address_ok_T_129 = _address_ok_T_128 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_1_11 = _address_ok_T_129; // @[Parameters.scala:612:40] wire _address_ok_T_130 = _address_ok_WIRE_1_0 | _address_ok_WIRE_1_1; // @[Parameters.scala:612:40, :636:64] wire _address_ok_T_131 = _address_ok_T_130 | _address_ok_WIRE_1_2; // @[Parameters.scala:612:40, :636:64] wire _address_ok_T_132 = _address_ok_T_131 | _address_ok_WIRE_1_3; // @[Parameters.scala:612:40, :636:64] wire _address_ok_T_133 = _address_ok_T_132 | _address_ok_WIRE_1_4; // @[Parameters.scala:612:40, :636:64] wire _address_ok_T_134 = _address_ok_T_133 | _address_ok_WIRE_1_5; // @[Parameters.scala:612:40, :636:64] wire _address_ok_T_135 = _address_ok_T_134 | _address_ok_WIRE_1_6; // @[Parameters.scala:612:40, :636:64] wire _address_ok_T_136 = _address_ok_T_135 | _address_ok_WIRE_1_7; // @[Parameters.scala:612:40, :636:64] wire _address_ok_T_137 = _address_ok_T_136 | _address_ok_WIRE_1_8; // @[Parameters.scala:612:40, :636:64] wire _address_ok_T_138 = _address_ok_T_137 | _address_ok_WIRE_1_9; // @[Parameters.scala:612:40, :636:64] wire _address_ok_T_139 = _address_ok_T_138 | _address_ok_WIRE_1_10; // @[Parameters.scala:612:40, :636:64] wire address_ok_1 = _address_ok_T_139 | _address_ok_WIRE_1_11; // @[Parameters.scala:612:40, :636:64] wire [2:0] uncommonBits_13 = _uncommonBits_T_13[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_14 = _uncommonBits_T_14[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_15 = _uncommonBits_T_15[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_16 = _uncommonBits_T_16[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_17 = _uncommonBits_T_17[2:0]; // @[Parameters.scala:52:{29,56}] wire _T_2551 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_2551; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_2551; // @[Decoupled.scala:51:35] wire [11:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [7:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[11:4]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [7:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 8'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [7:0] a_first_counter; // @[Edges.scala:229:27] wire [8:0] _a_first_counter1_T = {1'h0, a_first_counter} - 9'h1; // @[Edges.scala:229:27, :230:28] wire [7:0] a_first_counter1 = _a_first_counter1_T[7:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 8'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 8'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 8'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [7:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [7:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [7:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [3:0] size; // @[Monitor.scala:389:22] reg [3:0] source; // @[Monitor.scala:390:22] reg [31:0] address; // @[Monitor.scala:391:22] wire _T_2625 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_2625; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_2625; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_2625; // @[Decoupled.scala:51:35] wire _d_first_T_3; // @[Decoupled.scala:51:35] assign _d_first_T_3 = _T_2625; // @[Decoupled.scala:51:35] wire [26:0] _GEN_18 = 27'hFFF << io_in_d_bits_size_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_18; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_18; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_18; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_9; // @[package.scala:243:71] assign _d_first_beats1_decode_T_9 = _GEN_18; // @[package.scala:243:71] wire [11:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [7:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[11:4]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_3 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [7:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 8'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [7:0] d_first_counter; // @[Edges.scala:229:27] wire [8:0] _d_first_counter1_T = {1'h0, d_first_counter} - 9'h1; // @[Edges.scala:229:27, :230:28] wire [7:0] d_first_counter1 = _d_first_counter1_T[7:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 8'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 8'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 8'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [7:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [7:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [7:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [3:0] size_1; // @[Monitor.scala:540:22] reg [3:0] source_1; // @[Monitor.scala:541:22] reg [3:0] sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] wire _b_first_T = io_in_b_ready_0 & io_in_b_valid_0; // @[Decoupled.scala:51:35] wire b_first_done = _b_first_T; // @[Decoupled.scala:51:35] wire [11:0] _b_first_beats1_decode_T_1 = _b_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _b_first_beats1_decode_T_2 = ~_b_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [7:0] b_first_beats1_decode = _b_first_beats1_decode_T_2[11:4]; // @[package.scala:243:46] wire _b_first_beats1_opdata_T = io_in_b_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire b_first_beats1_opdata = ~_b_first_beats1_opdata_T; // @[Edges.scala:97:{28,37}] reg [7:0] b_first_counter; // @[Edges.scala:229:27] wire [8:0] _b_first_counter1_T = {1'h0, b_first_counter} - 9'h1; // @[Edges.scala:229:27, :230:28] wire [7:0] b_first_counter1 = _b_first_counter1_T[7:0]; // @[Edges.scala:230:28] wire b_first = b_first_counter == 8'h0; // @[Edges.scala:229:27, :231:25] wire _b_first_last_T = b_first_counter == 8'h1; // @[Edges.scala:229:27, :232:25] wire [7:0] _b_first_count_T = ~b_first_counter1; // @[Edges.scala:230:28, :234:27] wire [7:0] _b_first_counter_T = b_first ? 8'h0 : b_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21] reg [2:0] opcode_2; // @[Monitor.scala:410:22] reg [1:0] param_2; // @[Monitor.scala:411:22] reg [3:0] size_2; // @[Monitor.scala:412:22] reg [3:0] source_2; // @[Monitor.scala:413:22] reg [31:0] address_1; // @[Monitor.scala:414:22] wire _T_2622 = io_in_c_ready_0 & io_in_c_valid_0; // @[Decoupled.scala:51:35] wire _c_first_T; // @[Decoupled.scala:51:35] assign _c_first_T = _T_2622; // @[Decoupled.scala:51:35] wire _c_first_T_1; // @[Decoupled.scala:51:35] assign _c_first_T_1 = _T_2622; // @[Decoupled.scala:51:35] wire [11:0] _c_first_beats1_decode_T_1 = _c_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _c_first_beats1_decode_T_2 = ~_c_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [7:0] c_first_beats1_decode = _c_first_beats1_decode_T_2[11:4]; // @[package.scala:243:46] wire c_first_beats1_opdata = io_in_c_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire c_first_beats1_opdata_1 = io_in_c_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [7:0] c_first_beats1 = c_first_beats1_opdata ? c_first_beats1_decode : 8'h0; // @[Edges.scala:102:36, :220:59, :221:14] reg [7:0] c_first_counter; // @[Edges.scala:229:27] wire [8:0] _c_first_counter1_T = {1'h0, c_first_counter} - 9'h1; // @[Edges.scala:229:27, :230:28] wire [7:0] c_first_counter1 = _c_first_counter1_T[7:0]; // @[Edges.scala:230:28] wire c_first = c_first_counter == 8'h0; // @[Edges.scala:229:27, :231:25] wire _c_first_last_T = c_first_counter == 8'h1; // @[Edges.scala:229:27, :232:25] wire _c_first_last_T_1 = c_first_beats1 == 8'h0; // @[Edges.scala:221:14, :232:43] wire c_first_last = _c_first_last_T | _c_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire c_first_done = c_first_last & _c_first_T; // @[Decoupled.scala:51:35] wire [7:0] _c_first_count_T = ~c_first_counter1; // @[Edges.scala:230:28, :234:27] wire [7:0] c_first_count = c_first_beats1 & _c_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [7:0] _c_first_counter_T = c_first ? c_first_beats1 : c_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_3; // @[Monitor.scala:515:22] reg [2:0] param_3; // @[Monitor.scala:516:22] reg [3:0] size_3; // @[Monitor.scala:517:22] reg [3:0] source_3; // @[Monitor.scala:518:22] reg [31:0] address_2; // @[Monitor.scala:519:22] reg [8:0] inflight; // @[Monitor.scala:614:27] reg [35:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [71:0] inflight_sizes; // @[Monitor.scala:618:33] wire [11:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [7:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[11:4]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [7:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 8'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [7:0] a_first_counter_1; // @[Edges.scala:229:27] wire [8:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 9'h1; // @[Edges.scala:229:27, :230:28] wire [7:0] a_first_counter1_1 = _a_first_counter1_T_1[7:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 8'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 8'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 8'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [7:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [7:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [7:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [11:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [7:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[11:4]; // @[package.scala:243:46] wire [7:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 8'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [7:0] d_first_counter_1; // @[Edges.scala:229:27] wire [8:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 9'h1; // @[Edges.scala:229:27, :230:28] wire [7:0] d_first_counter1_1 = _d_first_counter1_T_1[7:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 8'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 8'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 8'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [7:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [7:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [7:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [8:0] a_set; // @[Monitor.scala:626:34] wire [8:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [35:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [71:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [6:0] _GEN_19 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [6:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_19; // @[Monitor.scala:637:69] wire [6:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_19; // @[Monitor.scala:637:69, :680:101] wire [6:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_19; // @[Monitor.scala:637:69, :749:69] wire [6:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_19; // @[Monitor.scala:637:69, :790:101] wire [35:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [35:0] _a_opcode_lookup_T_6 = {32'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [35:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[35:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [7:0] a_size_lookup; // @[Monitor.scala:639:33] wire [6:0] _GEN_20 = {io_in_d_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :641:65] wire [6:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_20; // @[Monitor.scala:641:65] wire [6:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_20; // @[Monitor.scala:641:65, :681:99] wire [6:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_20; // @[Monitor.scala:641:65, :750:67] wire [6:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_20; // @[Monitor.scala:641:65, :791:99] wire [71:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [71:0] _a_size_lookup_T_6 = {64'h0, _a_size_lookup_T_1[7:0]}; // @[Monitor.scala:641:{40,91}] wire [71:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[71:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[7:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [4:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [15:0] _GEN_21 = 16'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [15:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_21; // @[OneHot.scala:58:35] wire [15:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_21; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[8:0] : 9'h0; // @[OneHot.scala:58:35] wire _T_2477 = _T_2551 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_2477 ? _a_set_T[8:0] : 9'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_2477 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [4:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [4:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_2477 ? _a_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [6:0] _a_opcodes_set_T = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [130:0] _a_opcodes_set_T_1 = {127'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_2477 ? _a_opcodes_set_T_1[35:0] : 36'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [6:0] _a_sizes_set_T = {io_in_a_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :660:77] wire [131:0] _a_sizes_set_T_1 = {127'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_2477 ? _a_sizes_set_T_1[71:0] : 72'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [8:0] d_clr; // @[Monitor.scala:664:34] wire [8:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [35:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [71:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_22 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_22; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_22; // @[Monitor.scala:673:46, :783:46] wire _T_2523 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [15:0] _GEN_23 = 16'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [15:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_23; // @[OneHot.scala:58:35] wire [15:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_23; // @[OneHot.scala:58:35] wire [15:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_23; // @[OneHot.scala:58:35] wire [15:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_23; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_2523 & ~d_release_ack ? _d_clr_wo_ready_T[8:0] : 9'h0; // @[OneHot.scala:58:35] wire _T_2492 = _T_2625 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_2492 ? _d_clr_T[8:0] : 9'h0; // @[OneHot.scala:58:35] wire [142:0] _d_opcodes_clr_T_5 = 143'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_2492 ? _d_opcodes_clr_T_5[35:0] : 36'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [142:0] _d_sizes_clr_T_5 = 143'hFF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_2492 ? _d_sizes_clr_T_5[71:0] : 72'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [8:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [8:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [8:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [35:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [35:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [35:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [71:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [71:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [71:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [8:0] inflight_1; // @[Monitor.scala:726:35] reg [35:0] inflight_opcodes_1; // @[Monitor.scala:727:35] reg [71:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [11:0] _c_first_beats1_decode_T_4 = _c_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _c_first_beats1_decode_T_5 = ~_c_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [7:0] c_first_beats1_decode_1 = _c_first_beats1_decode_T_5[11:4]; // @[package.scala:243:46] wire [7:0] c_first_beats1_1 = c_first_beats1_opdata_1 ? c_first_beats1_decode_1 : 8'h0; // @[Edges.scala:102:36, :220:59, :221:14] reg [7:0] c_first_counter_1; // @[Edges.scala:229:27] wire [8:0] _c_first_counter1_T_1 = {1'h0, c_first_counter_1} - 9'h1; // @[Edges.scala:229:27, :230:28] wire [7:0] c_first_counter1_1 = _c_first_counter1_T_1[7:0]; // @[Edges.scala:230:28] wire c_first_1 = c_first_counter_1 == 8'h0; // @[Edges.scala:229:27, :231:25] wire _c_first_last_T_2 = c_first_counter_1 == 8'h1; // @[Edges.scala:229:27, :232:25] wire _c_first_last_T_3 = c_first_beats1_1 == 8'h0; // @[Edges.scala:221:14, :232:43] wire c_first_last_1 = _c_first_last_T_2 | _c_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire c_first_done_1 = c_first_last_1 & _c_first_T_1; // @[Decoupled.scala:51:35] wire [7:0] _c_first_count_T_1 = ~c_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [7:0] c_first_count_1 = c_first_beats1_1 & _c_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [7:0] _c_first_counter_T_1 = c_first_1 ? c_first_beats1_1 : c_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [11:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [7:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[11:4]; // @[package.scala:243:46] wire [7:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 8'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [7:0] d_first_counter_2; // @[Edges.scala:229:27] wire [8:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 9'h1; // @[Edges.scala:229:27, :230:28] wire [7:0] d_first_counter1_2 = _d_first_counter1_T_2[7:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 8'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 8'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 8'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [7:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [7:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [7:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [8:0] c_set; // @[Monitor.scala:738:34] wire [8:0] c_set_wo_ready; // @[Monitor.scala:739:34] wire [35:0] c_opcodes_set; // @[Monitor.scala:740:34] wire [71:0] c_sizes_set; // @[Monitor.scala:741:34] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [7:0] c_size_lookup; // @[Monitor.scala:748:35] wire [35:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [35:0] _c_opcode_lookup_T_6 = {32'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [35:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[35:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [71:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [71:0] _c_size_lookup_T_6 = {64'h0, _c_size_lookup_T_1[7:0]}; // @[Monitor.scala:750:{42,93}] wire [71:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[71:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[7:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [3:0] c_opcodes_set_interm; // @[Monitor.scala:754:40] wire [4:0] c_sizes_set_interm; // @[Monitor.scala:755:40] wire _same_cycle_resp_T_3 = io_in_c_valid_0 & c_first_1; // @[Monitor.scala:36:7, :759:26, :795:44] wire _same_cycle_resp_T_4 = io_in_c_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _same_cycle_resp_T_5 = io_in_c_bits_opcode_0[1]; // @[Monitor.scala:36:7] wire [15:0] _GEN_24 = 16'h1 << io_in_c_bits_source_0; // @[OneHot.scala:58:35] wire [15:0] _c_set_wo_ready_T; // @[OneHot.scala:58:35] assign _c_set_wo_ready_T = _GEN_24; // @[OneHot.scala:58:35] wire [15:0] _c_set_T; // @[OneHot.scala:58:35] assign _c_set_T = _GEN_24; // @[OneHot.scala:58:35] assign c_set_wo_ready = _same_cycle_resp_T_3 & _same_cycle_resp_T_4 & _same_cycle_resp_T_5 ? _c_set_wo_ready_T[8:0] : 9'h0; // @[OneHot.scala:58:35] wire _T_2564 = _T_2622 & c_first_1 & _same_cycle_resp_T_4 & _same_cycle_resp_T_5; // @[Decoupled.scala:51:35] assign c_set = _T_2564 ? _c_set_T[8:0] : 9'h0; // @[OneHot.scala:58:35] wire [3:0] _c_opcodes_set_interm_T = {io_in_c_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :765:53] wire [3:0] _c_opcodes_set_interm_T_1 = {_c_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:765:{53,61}] assign c_opcodes_set_interm = _T_2564 ? _c_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:754:40, :763:{25,36,70}, :765:{28,61}] wire [4:0] _c_sizes_set_interm_T = {io_in_c_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :766:51] wire [4:0] _c_sizes_set_interm_T_1 = {_c_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:766:{51,59}] assign c_sizes_set_interm = _T_2564 ? _c_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:755:40, :763:{25,36,70}, :766:{28,59}] wire [6:0] _c_opcodes_set_T = {1'h0, io_in_c_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :767:79] wire [130:0] _c_opcodes_set_T_1 = {127'h0, c_opcodes_set_interm} << _c_opcodes_set_T; // @[Monitor.scala:659:54, :754:40, :767:{54,79}] assign c_opcodes_set = _T_2564 ? _c_opcodes_set_T_1[35:0] : 36'h0; // @[Monitor.scala:740:34, :763:{25,36,70}, :767:{28,54}] wire [6:0] _c_sizes_set_T = {io_in_c_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :768:77] wire [131:0] _c_sizes_set_T_1 = {127'h0, c_sizes_set_interm} << _c_sizes_set_T; // @[Monitor.scala:659:54, :755:40, :768:{52,77}] assign c_sizes_set = _T_2564 ? _c_sizes_set_T_1[71:0] : 72'h0; // @[Monitor.scala:741:34, :763:{25,36,70}, :768:{28,52}] wire _c_probe_ack_T = io_in_c_bits_opcode_0 == 3'h4; // @[Monitor.scala:36:7, :772:47] wire _c_probe_ack_T_1 = io_in_c_bits_opcode_0 == 3'h5; // @[Monitor.scala:36:7, :772:95] wire c_probe_ack = _c_probe_ack_T | _c_probe_ack_T_1; // @[Monitor.scala:772:{47,71,95}] wire [8:0] d_clr_1; // @[Monitor.scala:774:34] wire [8:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [35:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [71:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_2595 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_2595 & d_release_ack_1 ? _d_clr_wo_ready_T_1[8:0] : 9'h0; // @[OneHot.scala:58:35] wire _T_2577 = _T_2625 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_2577 ? _d_clr_T_1[8:0] : 9'h0; // @[OneHot.scala:58:35] wire [142:0] _d_opcodes_clr_T_11 = 143'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_2577 ? _d_opcodes_clr_T_11[35:0] : 36'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [142:0] _d_sizes_clr_T_11 = 143'hFF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_2577 ? _d_sizes_clr_T_11[71:0] : 72'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_6 = _same_cycle_resp_T_4 & _same_cycle_resp_T_5; // @[Edges.scala:68:{36,40,51}] wire _same_cycle_resp_T_7 = _same_cycle_resp_T_3 & _same_cycle_resp_T_6; // @[Monitor.scala:795:{44,55}] wire _same_cycle_resp_T_8 = io_in_c_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :795:113] wire same_cycle_resp_1 = _same_cycle_resp_T_7 & _same_cycle_resp_T_8; // @[Monitor.scala:795:{55,88,113}] wire [8:0] _inflight_T_3 = inflight_1 | c_set; // @[Monitor.scala:726:35, :738:34, :814:35] wire [8:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [8:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [35:0] _inflight_opcodes_T_3 = inflight_opcodes_1 | c_opcodes_set; // @[Monitor.scala:727:35, :740:34, :815:43] wire [35:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [35:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [71:0] _inflight_sizes_T_3 = inflight_sizes_1 | c_sizes_set; // @[Monitor.scala:728:35, :741:34, :816:41] wire [71:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [71:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27] wire [32:0] _watchdog_T_2 = {1'h0, watchdog_1} + 33'h1; // @[Monitor.scala:818:27, :823:26] wire [31:0] _watchdog_T_3 = _watchdog_T_2[31:0]; // @[Monitor.scala:823:26] reg [15:0] inflight_2; // @[Monitor.scala:828:27] wire [11:0] _d_first_beats1_decode_T_10 = _d_first_beats1_decode_T_9[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_11 = ~_d_first_beats1_decode_T_10; // @[package.scala:243:{46,76}] wire [7:0] d_first_beats1_decode_3 = _d_first_beats1_decode_T_11[11:4]; // @[package.scala:243:46] wire [7:0] d_first_beats1_3 = d_first_beats1_opdata_3 ? d_first_beats1_decode_3 : 8'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [7:0] d_first_counter_3; // @[Edges.scala:229:27] wire [8:0] _d_first_counter1_T_3 = {1'h0, d_first_counter_3} - 9'h1; // @[Edges.scala:229:27, :230:28] wire [7:0] d_first_counter1_3 = _d_first_counter1_T_3[7:0]; // @[Edges.scala:230:28] wire d_first_3 = d_first_counter_3 == 8'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_6 = d_first_counter_3 == 8'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_7 = d_first_beats1_3 == 8'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_3 = _d_first_last_T_6 | _d_first_last_T_7; // @[Edges.scala:232:{25,33,43}] wire d_first_done_3 = d_first_last_3 & _d_first_T_3; // @[Decoupled.scala:51:35] wire [7:0] _d_first_count_T_3 = ~d_first_counter1_3; // @[Edges.scala:230:28, :234:27] wire [7:0] d_first_count_3 = d_first_beats1_3 & _d_first_count_T_3; // @[Edges.scala:221:14, :234:{25,27}] wire [7:0] _d_first_counter_T_3 = d_first_3 ? d_first_beats1_3 : d_first_counter1_3; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [15:0] d_set; // @[Monitor.scala:833:25] wire _T_2631 = _T_2625 & d_first_3 & io_in_d_bits_opcode_0[2] & ~(io_in_d_bits_opcode_0[1]); // @[Decoupled.scala:51:35] wire [15:0] _GEN_25 = {12'h0, io_in_d_bits_sink_0}; // @[OneHot.scala:58:35] wire [15:0] _d_set_T = 16'h1 << _GEN_25; // @[OneHot.scala:58:35] assign d_set = _T_2631 ? _d_set_T : 16'h0; // @[OneHot.scala:58:35] wire [15:0] e_clr; // @[Monitor.scala:839:25] wire _T_2640 = io_in_e_ready_0 & io_in_e_valid_0; // @[Decoupled.scala:51:35] wire [15:0] _GEN_26 = {12'h0, io_in_e_bits_sink_0}; // @[OneHot.scala:58:35] wire [15:0] _e_clr_T = 16'h1 << _GEN_26; // @[OneHot.scala:58:35] assign e_clr = _T_2640 ? _e_clr_T : 16'h0; // @[OneHot.scala:58:35]
Generate the Verilog code corresponding to the following Chisel files. File FrontendTLB.scala: package gemmini import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.rocket._ import freechips.rocketchip.tile.{CoreBundle, CoreModule} import freechips.rocketchip.tilelink.TLEdgeOut import Util._ import midas.targetutils.PerfCounter class DecoupledTLBReq(val lgMaxSize: Int)(implicit p: Parameters) extends CoreBundle { val tlb_req = new TLBReq(lgMaxSize) val status = new MStatus } class TLBExceptionIO extends Bundle { val interrupt = Output(Bool()) val flush_retry = Input(Bool()) val flush_skip = Input(Bool()) def flush(dummy: Int = 0): Bool = flush_retry || flush_skip } // TODO can we make TLB hits only take one cycle? class DecoupledTLB(entries: Int, maxSize: Int, use_firesim_simulation_counters: Boolean)(implicit edge: TLEdgeOut, p: Parameters) extends CoreModule { val lgMaxSize = log2Ceil(maxSize) val io = IO(new Bundle { val req = Flipped(Valid(new DecoupledTLBReq(lgMaxSize))) val resp = new TLBResp val ptw = new TLBPTWIO val exp = new TLBExceptionIO val counter = new CounterEventIO() }) val interrupt = RegInit(false.B) io.exp.interrupt := interrupt val tlb = Module(new TLB(false, lgMaxSize, TLBConfig(nSets=1, nWays=entries))) tlb.io.req.valid := io.req.valid tlb.io.req.bits := io.req.bits.tlb_req io.resp := tlb.io.resp tlb.io.kill := false.B tlb.io.sfence.valid := io.exp.flush() tlb.io.sfence.bits.rs1 := false.B tlb.io.sfence.bits.rs2 := false.B tlb.io.sfence.bits.addr := DontCare tlb.io.sfence.bits.asid := DontCare tlb.io.sfence.bits.hv := false.B tlb.io.sfence.bits.hg := false.B io.ptw <> tlb.io.ptw tlb.io.ptw.status := io.req.bits.status val exception = io.req.valid && Mux(io.req.bits.tlb_req.cmd === M_XRD, tlb.io.resp.pf.ld || tlb.io.resp.ae.ld, tlb.io.resp.pf.st || tlb.io.resp.ae.st) when (exception) { interrupt := true.B } when (interrupt && tlb.io.sfence.fire) { interrupt := false.B } assert(!io.exp.flush_retry || !io.exp.flush_skip, "TLB: flushing with both retry and skip at same time") CounterEventIO.init(io.counter) io.counter.connectEventSignal(CounterEvent.DMA_TLB_HIT_REQ, io.req.fire && !tlb.io.resp.miss) io.counter.connectEventSignal(CounterEvent.DMA_TLB_TOTAL_REQ, io.req.fire) io.counter.connectEventSignal(CounterEvent.DMA_TLB_MISS_CYCLE, tlb.io.resp.miss) if (use_firesim_simulation_counters) { PerfCounter(io.req.fire && !tlb.io.resp.miss, "tlb_hits", "total number of tlb hits") PerfCounter(io.req.fire, "tlb_reqs", "total number of tlb reqs") PerfCounter(tlb.io.resp.miss, "tlb_miss_cycles", "total number of cycles where the tlb is resolving a miss") } } class FrontendTLBIO(implicit p: Parameters) extends CoreBundle { val lgMaxSize = log2Ceil(coreDataBytes) // val req = Decoupled(new TLBReq(lgMaxSize)) val req = Valid(new DecoupledTLBReq(lgMaxSize)) val resp = Flipped(new TLBResp) } class FrontendTLB(nClients: Int, entries: Int, maxSize: Int, use_tlb_register_filter: Boolean, use_firesim_simulation_counters: Boolean, use_shared_tlb: Boolean) (implicit edge: TLEdgeOut, p: Parameters) extends CoreModule { val num_tlbs = if (use_shared_tlb) 1 else nClients val lgMaxSize = log2Ceil(coreDataBytes) val io = IO(new Bundle { val clients = Flipped(Vec(nClients, new FrontendTLBIO)) val ptw = Vec(num_tlbs, new TLBPTWIO) val exp = Vec(num_tlbs, new TLBExceptionIO) val counter = new CounterEventIO() }) val tlbs = Seq.fill(num_tlbs)(Module(new DecoupledTLB(entries, maxSize, use_firesim_simulation_counters))) io.ptw <> VecInit(tlbs.map(_.io.ptw)) io.exp <> VecInit(tlbs.map(_.io.exp)) val tlbArbOpt = if (use_shared_tlb) Some(Module(new RRArbiter(new DecoupledTLBReq(lgMaxSize), nClients))) else None if (use_shared_tlb) { val tlbArb = tlbArbOpt.get val tlb = tlbs.head tlb.io.req.valid := tlbArb.io.out.valid tlb.io.req.bits := tlbArb.io.out.bits tlbArb.io.out.ready := true.B } io.clients.zipWithIndex.foreach { case (client, i) => val last_translated_valid = RegInit(false.B) val last_translated_vpn = RegInit(0.U(vaddrBits.W)) val last_translated_ppn = RegInit(0.U(paddrBits.W)) val l0_tlb_hit = last_translated_valid && ((client.req.bits.tlb_req.vaddr >> pgIdxBits).asUInt === (last_translated_vpn >> pgIdxBits).asUInt) val l0_tlb_paddr = Cat(last_translated_ppn >> pgIdxBits, client.req.bits.tlb_req.vaddr(pgIdxBits-1,0)) val tlb = if (use_shared_tlb) tlbs.head else tlbs(i) val tlbReq = if (use_shared_tlb) tlbArbOpt.get.io.in(i).bits else tlb.io.req.bits val tlbReqValid = if (use_shared_tlb) tlbArbOpt.get.io.in(i).valid else tlb.io.req.valid val tlbReqFire = if (use_shared_tlb) tlbArbOpt.get.io.in(i).fire else tlb.io.req.fire tlbReqValid := RegNext(client.req.valid && !l0_tlb_hit) tlbReq := RegNext(client.req.bits) when (tlbReqFire && !tlb.io.resp.miss) { last_translated_valid := true.B last_translated_vpn := tlbReq.tlb_req.vaddr last_translated_ppn := tlb.io.resp.paddr } when (tlb.io.exp.flush()) { last_translated_valid := false.B } when (tlbReqFire) { client.resp := tlb.io.resp }.otherwise { client.resp := DontCare client.resp.paddr := RegNext(l0_tlb_paddr) client.resp.miss := !RegNext(l0_tlb_hit) } // If we're not using the TLB filter register, then we set this value to always be false if (!use_tlb_register_filter) { last_translated_valid := false.B } } // TODO Return the sum of the TLB counters, rather than just the counters of the first TLB. This only matters if we're // not using the shared TLB io.counter := DontCare tlbs.foreach(_.io.counter.external_reset := false.B) io.counter.collect(tlbs.head.io.counter) }
module DecoupledTLB( // @[FrontendTLB.scala:29:7] input clock, // @[FrontendTLB.scala:29:7] input reset, // @[FrontendTLB.scala:29:7] input io_req_valid, // @[FrontendTLB.scala:33:14] input [39:0] io_req_bits_tlb_req_vaddr, // @[FrontendTLB.scala:33:14] input [4:0] io_req_bits_tlb_req_cmd, // @[FrontendTLB.scala:33:14] input io_req_bits_status_debug, // @[FrontendTLB.scala:33:14] input io_req_bits_status_cease, // @[FrontendTLB.scala:33:14] input io_req_bits_status_wfi, // @[FrontendTLB.scala:33:14] input [31:0] io_req_bits_status_isa, // @[FrontendTLB.scala:33:14] input [1:0] io_req_bits_status_dprv, // @[FrontendTLB.scala:33:14] input io_req_bits_status_dv, // @[FrontendTLB.scala:33:14] input [1:0] io_req_bits_status_prv, // @[FrontendTLB.scala:33:14] input io_req_bits_status_v, // @[FrontendTLB.scala:33:14] input io_req_bits_status_sd, // @[FrontendTLB.scala:33:14] input [22:0] io_req_bits_status_zero2, // @[FrontendTLB.scala:33:14] input io_req_bits_status_mpv, // @[FrontendTLB.scala:33:14] input io_req_bits_status_gva, // @[FrontendTLB.scala:33:14] input io_req_bits_status_mbe, // @[FrontendTLB.scala:33:14] input io_req_bits_status_sbe, // @[FrontendTLB.scala:33:14] input [1:0] io_req_bits_status_sxl, // @[FrontendTLB.scala:33:14] input [1:0] io_req_bits_status_uxl, // @[FrontendTLB.scala:33:14] input io_req_bits_status_sd_rv32, // @[FrontendTLB.scala:33:14] input [7:0] io_req_bits_status_zero1, // @[FrontendTLB.scala:33:14] input io_req_bits_status_tsr, // @[FrontendTLB.scala:33:14] input io_req_bits_status_tw, // @[FrontendTLB.scala:33:14] input io_req_bits_status_tvm, // @[FrontendTLB.scala:33:14] input io_req_bits_status_mxr, // @[FrontendTLB.scala:33:14] input io_req_bits_status_sum, // @[FrontendTLB.scala:33:14] input io_req_bits_status_mprv, // @[FrontendTLB.scala:33:14] input [1:0] io_req_bits_status_xs, // @[FrontendTLB.scala:33:14] input [1:0] io_req_bits_status_fs, // @[FrontendTLB.scala:33:14] input [1:0] io_req_bits_status_mpp, // @[FrontendTLB.scala:33:14] input [1:0] io_req_bits_status_vs, // @[FrontendTLB.scala:33:14] input io_req_bits_status_spp, // @[FrontendTLB.scala:33:14] input io_req_bits_status_mpie, // @[FrontendTLB.scala:33:14] input io_req_bits_status_ube, // @[FrontendTLB.scala:33:14] input io_req_bits_status_spie, // @[FrontendTLB.scala:33:14] input io_req_bits_status_upie, // @[FrontendTLB.scala:33:14] input io_req_bits_status_mie, // @[FrontendTLB.scala:33:14] input io_req_bits_status_hie, // @[FrontendTLB.scala:33:14] input io_req_bits_status_sie, // @[FrontendTLB.scala:33:14] input io_req_bits_status_uie, // @[FrontendTLB.scala:33:14] output io_resp_miss, // @[FrontendTLB.scala:33:14] output [31:0] io_resp_paddr, // @[FrontendTLB.scala:33:14] output [39:0] io_resp_gpa, // @[FrontendTLB.scala:33:14] output io_resp_pf_ld, // @[FrontendTLB.scala:33:14] output io_resp_pf_st, // @[FrontendTLB.scala:33:14] output io_resp_pf_inst, // @[FrontendTLB.scala:33:14] output io_resp_ae_ld, // @[FrontendTLB.scala:33:14] output io_resp_ae_st, // @[FrontendTLB.scala:33:14] output io_resp_ae_inst, // @[FrontendTLB.scala:33:14] output io_resp_cacheable, // @[FrontendTLB.scala:33:14] output io_resp_must_alloc, // @[FrontendTLB.scala:33:14] output io_resp_prefetchable, // @[FrontendTLB.scala:33:14] output [4:0] io_resp_cmd, // @[FrontendTLB.scala:33:14] input io_ptw_req_ready, // @[FrontendTLB.scala:33:14] output io_ptw_req_valid, // @[FrontendTLB.scala:33:14] output [26:0] io_ptw_req_bits_bits_addr, // @[FrontendTLB.scala:33:14] output io_ptw_req_bits_bits_need_gpa, // @[FrontendTLB.scala:33:14] input io_ptw_resp_valid, // @[FrontendTLB.scala:33:14] input io_ptw_resp_bits_ae_ptw, // @[FrontendTLB.scala:33:14] input io_ptw_resp_bits_ae_final, // @[FrontendTLB.scala:33:14] input io_ptw_resp_bits_pf, // @[FrontendTLB.scala:33:14] input io_ptw_resp_bits_gf, // @[FrontendTLB.scala:33:14] input io_ptw_resp_bits_hr, // @[FrontendTLB.scala:33:14] input io_ptw_resp_bits_hw, // @[FrontendTLB.scala:33:14] input io_ptw_resp_bits_hx, // @[FrontendTLB.scala:33:14] input [9:0] io_ptw_resp_bits_pte_reserved_for_future, // @[FrontendTLB.scala:33:14] input [43:0] io_ptw_resp_bits_pte_ppn, // @[FrontendTLB.scala:33:14] input [1:0] io_ptw_resp_bits_pte_reserved_for_software, // @[FrontendTLB.scala:33:14] input io_ptw_resp_bits_pte_d, // @[FrontendTLB.scala:33:14] input io_ptw_resp_bits_pte_a, // @[FrontendTLB.scala:33:14] input io_ptw_resp_bits_pte_g, // @[FrontendTLB.scala:33:14] input io_ptw_resp_bits_pte_u, // @[FrontendTLB.scala:33:14] input io_ptw_resp_bits_pte_x, // @[FrontendTLB.scala:33:14] input io_ptw_resp_bits_pte_w, // @[FrontendTLB.scala:33:14] input io_ptw_resp_bits_pte_r, // @[FrontendTLB.scala:33:14] input io_ptw_resp_bits_pte_v, // @[FrontendTLB.scala:33:14] input [1:0] io_ptw_resp_bits_level, // @[FrontendTLB.scala:33:14] input io_ptw_resp_bits_homogeneous, // @[FrontendTLB.scala:33:14] input io_ptw_resp_bits_gpa_valid, // @[FrontendTLB.scala:33:14] input [38:0] io_ptw_resp_bits_gpa_bits, // @[FrontendTLB.scala:33:14] input io_ptw_resp_bits_gpa_is_pte, // @[FrontendTLB.scala:33:14] input [3:0] io_ptw_ptbr_mode, // @[FrontendTLB.scala:33:14] input [43:0] io_ptw_ptbr_ppn, // @[FrontendTLB.scala:33:14] input io_ptw_status_debug, // @[FrontendTLB.scala:33:14] input io_ptw_status_cease, // @[FrontendTLB.scala:33:14] input io_ptw_status_wfi, // @[FrontendTLB.scala:33:14] input [31:0] io_ptw_status_isa, // @[FrontendTLB.scala:33:14] input [1:0] io_ptw_status_dprv, // @[FrontendTLB.scala:33:14] input io_ptw_status_dv, // @[FrontendTLB.scala:33:14] input [1:0] io_ptw_status_prv, // @[FrontendTLB.scala:33:14] input io_ptw_status_v, // @[FrontendTLB.scala:33:14] input io_ptw_status_mpv, // @[FrontendTLB.scala:33:14] input io_ptw_status_gva, // @[FrontendTLB.scala:33:14] input io_ptw_status_tsr, // @[FrontendTLB.scala:33:14] input io_ptw_status_tw, // @[FrontendTLB.scala:33:14] input io_ptw_status_tvm, // @[FrontendTLB.scala:33:14] input io_ptw_status_mxr, // @[FrontendTLB.scala:33:14] input io_ptw_status_sum, // @[FrontendTLB.scala:33:14] input io_ptw_status_mprv, // @[FrontendTLB.scala:33:14] input [1:0] io_ptw_status_fs, // @[FrontendTLB.scala:33:14] input [1:0] io_ptw_status_mpp, // @[FrontendTLB.scala:33:14] input io_ptw_status_spp, // @[FrontendTLB.scala:33:14] input io_ptw_status_mpie, // @[FrontendTLB.scala:33:14] input io_ptw_status_spie, // @[FrontendTLB.scala:33:14] input io_ptw_status_mie, // @[FrontendTLB.scala:33:14] input io_ptw_status_sie, // @[FrontendTLB.scala:33:14] input io_ptw_hstatus_spvp, // @[FrontendTLB.scala:33:14] input io_ptw_hstatus_spv, // @[FrontendTLB.scala:33:14] input io_ptw_hstatus_gva, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_debug, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_cease, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_wfi, // @[FrontendTLB.scala:33:14] input [31:0] io_ptw_gstatus_isa, // @[FrontendTLB.scala:33:14] input [1:0] io_ptw_gstatus_dprv, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_dv, // @[FrontendTLB.scala:33:14] input [1:0] io_ptw_gstatus_prv, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_v, // @[FrontendTLB.scala:33:14] input [22:0] io_ptw_gstatus_zero2, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_mpv, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_gva, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_mbe, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_sbe, // @[FrontendTLB.scala:33:14] input [1:0] io_ptw_gstatus_sxl, // @[FrontendTLB.scala:33:14] input [7:0] io_ptw_gstatus_zero1, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_tsr, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_tw, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_tvm, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_mxr, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_sum, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_mprv, // @[FrontendTLB.scala:33:14] input [1:0] io_ptw_gstatus_fs, // @[FrontendTLB.scala:33:14] input [1:0] io_ptw_gstatus_mpp, // @[FrontendTLB.scala:33:14] input [1:0] io_ptw_gstatus_vs, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_spp, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_mpie, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_ube, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_spie, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_upie, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_mie, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_hie, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_sie, // @[FrontendTLB.scala:33:14] input io_ptw_gstatus_uie, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_0_cfg_l, // @[FrontendTLB.scala:33:14] input [1:0] io_ptw_pmp_0_cfg_a, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_0_cfg_x, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_0_cfg_w, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_0_cfg_r, // @[FrontendTLB.scala:33:14] input [29:0] io_ptw_pmp_0_addr, // @[FrontendTLB.scala:33:14] input [31:0] io_ptw_pmp_0_mask, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_1_cfg_l, // @[FrontendTLB.scala:33:14] input [1:0] io_ptw_pmp_1_cfg_a, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_1_cfg_x, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_1_cfg_w, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_1_cfg_r, // @[FrontendTLB.scala:33:14] input [29:0] io_ptw_pmp_1_addr, // @[FrontendTLB.scala:33:14] input [31:0] io_ptw_pmp_1_mask, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_2_cfg_l, // @[FrontendTLB.scala:33:14] input [1:0] io_ptw_pmp_2_cfg_a, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_2_cfg_x, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_2_cfg_w, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_2_cfg_r, // @[FrontendTLB.scala:33:14] input [29:0] io_ptw_pmp_2_addr, // @[FrontendTLB.scala:33:14] input [31:0] io_ptw_pmp_2_mask, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_3_cfg_l, // @[FrontendTLB.scala:33:14] input [1:0] io_ptw_pmp_3_cfg_a, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_3_cfg_x, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_3_cfg_w, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_3_cfg_r, // @[FrontendTLB.scala:33:14] input [29:0] io_ptw_pmp_3_addr, // @[FrontendTLB.scala:33:14] input [31:0] io_ptw_pmp_3_mask, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_4_cfg_l, // @[FrontendTLB.scala:33:14] input [1:0] io_ptw_pmp_4_cfg_a, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_4_cfg_x, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_4_cfg_w, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_4_cfg_r, // @[FrontendTLB.scala:33:14] input [29:0] io_ptw_pmp_4_addr, // @[FrontendTLB.scala:33:14] input [31:0] io_ptw_pmp_4_mask, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_5_cfg_l, // @[FrontendTLB.scala:33:14] input [1:0] io_ptw_pmp_5_cfg_a, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_5_cfg_x, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_5_cfg_w, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_5_cfg_r, // @[FrontendTLB.scala:33:14] input [29:0] io_ptw_pmp_5_addr, // @[FrontendTLB.scala:33:14] input [31:0] io_ptw_pmp_5_mask, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_6_cfg_l, // @[FrontendTLB.scala:33:14] input [1:0] io_ptw_pmp_6_cfg_a, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_6_cfg_x, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_6_cfg_w, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_6_cfg_r, // @[FrontendTLB.scala:33:14] input [29:0] io_ptw_pmp_6_addr, // @[FrontendTLB.scala:33:14] input [31:0] io_ptw_pmp_6_mask, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_7_cfg_l, // @[FrontendTLB.scala:33:14] input [1:0] io_ptw_pmp_7_cfg_a, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_7_cfg_x, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_7_cfg_w, // @[FrontendTLB.scala:33:14] input io_ptw_pmp_7_cfg_r, // @[FrontendTLB.scala:33:14] input [29:0] io_ptw_pmp_7_addr, // @[FrontendTLB.scala:33:14] input [31:0] io_ptw_pmp_7_mask, // @[FrontendTLB.scala:33:14] input io_ptw_customCSRs_csrs_0_ren, // @[FrontendTLB.scala:33:14] input io_ptw_customCSRs_csrs_0_wen, // @[FrontendTLB.scala:33:14] input [63:0] io_ptw_customCSRs_csrs_0_wdata, // @[FrontendTLB.scala:33:14] input [63:0] io_ptw_customCSRs_csrs_0_value, // @[FrontendTLB.scala:33:14] input io_ptw_customCSRs_csrs_1_ren, // @[FrontendTLB.scala:33:14] input io_ptw_customCSRs_csrs_1_wen, // @[FrontendTLB.scala:33:14] input [63:0] io_ptw_customCSRs_csrs_1_wdata, // @[FrontendTLB.scala:33:14] input [63:0] io_ptw_customCSRs_csrs_1_value, // @[FrontendTLB.scala:33:14] input io_ptw_customCSRs_csrs_2_ren, // @[FrontendTLB.scala:33:14] input io_ptw_customCSRs_csrs_2_wen, // @[FrontendTLB.scala:33:14] input [63:0] io_ptw_customCSRs_csrs_2_wdata, // @[FrontendTLB.scala:33:14] input [63:0] io_ptw_customCSRs_csrs_2_value, // @[FrontendTLB.scala:33:14] input io_ptw_customCSRs_csrs_3_ren, // @[FrontendTLB.scala:33:14] input io_ptw_customCSRs_csrs_3_wen, // @[FrontendTLB.scala:33:14] input [63:0] io_ptw_customCSRs_csrs_3_wdata, // @[FrontendTLB.scala:33:14] input [63:0] io_ptw_customCSRs_csrs_3_value, // @[FrontendTLB.scala:33:14] output io_exp_interrupt, // @[FrontendTLB.scala:33:14] input io_exp_flush_retry, // @[FrontendTLB.scala:33:14] input io_exp_flush_skip, // @[FrontendTLB.scala:33:14] output io_counter_event_signal_15, // @[FrontendTLB.scala:33:14] output io_counter_event_signal_16, // @[FrontendTLB.scala:33:14] output io_counter_event_signal_17, // @[FrontendTLB.scala:33:14] input io_counter_external_reset // @[FrontendTLB.scala:33:14] ); wire _tlb_io_resp_miss; // @[FrontendTLB.scala:46:19] wire _tlb_io_resp_pf_ld; // @[FrontendTLB.scala:46:19] wire _tlb_io_resp_pf_st; // @[FrontendTLB.scala:46:19] wire _tlb_io_resp_ae_ld; // @[FrontendTLB.scala:46:19] wire _tlb_io_resp_ae_st; // @[FrontendTLB.scala:46:19] wire io_req_valid_0 = io_req_valid; // @[FrontendTLB.scala:29:7] wire [39:0] io_req_bits_tlb_req_vaddr_0 = io_req_bits_tlb_req_vaddr; // @[FrontendTLB.scala:29:7] wire [4:0] io_req_bits_tlb_req_cmd_0 = io_req_bits_tlb_req_cmd; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_debug_0 = io_req_bits_status_debug; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_cease_0 = io_req_bits_status_cease; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_wfi_0 = io_req_bits_status_wfi; // @[FrontendTLB.scala:29:7] wire [31:0] io_req_bits_status_isa_0 = io_req_bits_status_isa; // @[FrontendTLB.scala:29:7] wire [1:0] io_req_bits_status_dprv_0 = io_req_bits_status_dprv; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_dv_0 = io_req_bits_status_dv; // @[FrontendTLB.scala:29:7] wire [1:0] io_req_bits_status_prv_0 = io_req_bits_status_prv; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_v_0 = io_req_bits_status_v; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_sd_0 = io_req_bits_status_sd; // @[FrontendTLB.scala:29:7] wire [22:0] io_req_bits_status_zero2_0 = io_req_bits_status_zero2; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_mpv_0 = io_req_bits_status_mpv; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_gva_0 = io_req_bits_status_gva; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_mbe_0 = io_req_bits_status_mbe; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_sbe_0 = io_req_bits_status_sbe; // @[FrontendTLB.scala:29:7] wire [1:0] io_req_bits_status_sxl_0 = io_req_bits_status_sxl; // @[FrontendTLB.scala:29:7] wire [1:0] io_req_bits_status_uxl_0 = io_req_bits_status_uxl; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_sd_rv32_0 = io_req_bits_status_sd_rv32; // @[FrontendTLB.scala:29:7] wire [7:0] io_req_bits_status_zero1_0 = io_req_bits_status_zero1; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_tsr_0 = io_req_bits_status_tsr; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_tw_0 = io_req_bits_status_tw; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_tvm_0 = io_req_bits_status_tvm; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_mxr_0 = io_req_bits_status_mxr; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_sum_0 = io_req_bits_status_sum; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_mprv_0 = io_req_bits_status_mprv; // @[FrontendTLB.scala:29:7] wire [1:0] io_req_bits_status_xs_0 = io_req_bits_status_xs; // @[FrontendTLB.scala:29:7] wire [1:0] io_req_bits_status_fs_0 = io_req_bits_status_fs; // @[FrontendTLB.scala:29:7] wire [1:0] io_req_bits_status_mpp_0 = io_req_bits_status_mpp; // @[FrontendTLB.scala:29:7] wire [1:0] io_req_bits_status_vs_0 = io_req_bits_status_vs; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_spp_0 = io_req_bits_status_spp; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_mpie_0 = io_req_bits_status_mpie; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_ube_0 = io_req_bits_status_ube; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_spie_0 = io_req_bits_status_spie; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_upie_0 = io_req_bits_status_upie; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_mie_0 = io_req_bits_status_mie; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_hie_0 = io_req_bits_status_hie; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_sie_0 = io_req_bits_status_sie; // @[FrontendTLB.scala:29:7] wire io_req_bits_status_uie_0 = io_req_bits_status_uie; // @[FrontendTLB.scala:29:7] wire io_ptw_req_ready_0 = io_ptw_req_ready; // @[FrontendTLB.scala:29:7] wire io_ptw_resp_valid_0 = io_ptw_resp_valid; // @[FrontendTLB.scala:29:7] wire io_ptw_resp_bits_ae_ptw_0 = io_ptw_resp_bits_ae_ptw; // @[FrontendTLB.scala:29:7] wire io_ptw_resp_bits_ae_final_0 = io_ptw_resp_bits_ae_final; // @[FrontendTLB.scala:29:7] wire io_ptw_resp_bits_pf_0 = io_ptw_resp_bits_pf; // @[FrontendTLB.scala:29:7] wire io_ptw_resp_bits_gf_0 = io_ptw_resp_bits_gf; // @[FrontendTLB.scala:29:7] wire io_ptw_resp_bits_hr_0 = io_ptw_resp_bits_hr; // @[FrontendTLB.scala:29:7] wire io_ptw_resp_bits_hw_0 = io_ptw_resp_bits_hw; // @[FrontendTLB.scala:29:7] wire io_ptw_resp_bits_hx_0 = io_ptw_resp_bits_hx; // @[FrontendTLB.scala:29:7] wire [9:0] io_ptw_resp_bits_pte_reserved_for_future_0 = io_ptw_resp_bits_pte_reserved_for_future; // @[FrontendTLB.scala:29:7] wire [43:0] io_ptw_resp_bits_pte_ppn_0 = io_ptw_resp_bits_pte_ppn; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_resp_bits_pte_reserved_for_software_0 = io_ptw_resp_bits_pte_reserved_for_software; // @[FrontendTLB.scala:29:7] wire io_ptw_resp_bits_pte_d_0 = io_ptw_resp_bits_pte_d; // @[FrontendTLB.scala:29:7] wire io_ptw_resp_bits_pte_a_0 = io_ptw_resp_bits_pte_a; // @[FrontendTLB.scala:29:7] wire io_ptw_resp_bits_pte_g_0 = io_ptw_resp_bits_pte_g; // @[FrontendTLB.scala:29:7] wire io_ptw_resp_bits_pte_u_0 = io_ptw_resp_bits_pte_u; // @[FrontendTLB.scala:29:7] wire io_ptw_resp_bits_pte_x_0 = io_ptw_resp_bits_pte_x; // @[FrontendTLB.scala:29:7] wire io_ptw_resp_bits_pte_w_0 = io_ptw_resp_bits_pte_w; // @[FrontendTLB.scala:29:7] wire io_ptw_resp_bits_pte_r_0 = io_ptw_resp_bits_pte_r; // @[FrontendTLB.scala:29:7] wire io_ptw_resp_bits_pte_v_0 = io_ptw_resp_bits_pte_v; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_resp_bits_level_0 = io_ptw_resp_bits_level; // @[FrontendTLB.scala:29:7] wire io_ptw_resp_bits_homogeneous_0 = io_ptw_resp_bits_homogeneous; // @[FrontendTLB.scala:29:7] wire io_ptw_resp_bits_gpa_valid_0 = io_ptw_resp_bits_gpa_valid; // @[FrontendTLB.scala:29:7] wire [38:0] io_ptw_resp_bits_gpa_bits_0 = io_ptw_resp_bits_gpa_bits; // @[FrontendTLB.scala:29:7] wire io_ptw_resp_bits_gpa_is_pte_0 = io_ptw_resp_bits_gpa_is_pte; // @[FrontendTLB.scala:29:7] wire [3:0] io_ptw_ptbr_mode_0 = io_ptw_ptbr_mode; // @[FrontendTLB.scala:29:7] wire [43:0] io_ptw_ptbr_ppn_0 = io_ptw_ptbr_ppn; // @[FrontendTLB.scala:29:7] wire io_ptw_status_debug_0 = io_ptw_status_debug; // @[FrontendTLB.scala:29:7] wire io_ptw_status_cease_0 = io_ptw_status_cease; // @[FrontendTLB.scala:29:7] wire io_ptw_status_wfi_0 = io_ptw_status_wfi; // @[FrontendTLB.scala:29:7] wire [31:0] io_ptw_status_isa_0 = io_ptw_status_isa; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_status_dprv_0 = io_ptw_status_dprv; // @[FrontendTLB.scala:29:7] wire io_ptw_status_dv_0 = io_ptw_status_dv; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_status_prv_0 = io_ptw_status_prv; // @[FrontendTLB.scala:29:7] wire io_ptw_status_v_0 = io_ptw_status_v; // @[FrontendTLB.scala:29:7] wire io_ptw_status_mpv_0 = io_ptw_status_mpv; // @[FrontendTLB.scala:29:7] wire io_ptw_status_gva_0 = io_ptw_status_gva; // @[FrontendTLB.scala:29:7] wire io_ptw_status_tsr_0 = io_ptw_status_tsr; // @[FrontendTLB.scala:29:7] wire io_ptw_status_tw_0 = io_ptw_status_tw; // @[FrontendTLB.scala:29:7] wire io_ptw_status_tvm_0 = io_ptw_status_tvm; // @[FrontendTLB.scala:29:7] wire io_ptw_status_mxr_0 = io_ptw_status_mxr; // @[FrontendTLB.scala:29:7] wire io_ptw_status_sum_0 = io_ptw_status_sum; // @[FrontendTLB.scala:29:7] wire io_ptw_status_mprv_0 = io_ptw_status_mprv; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_status_fs_0 = io_ptw_status_fs; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_status_mpp_0 = io_ptw_status_mpp; // @[FrontendTLB.scala:29:7] wire io_ptw_status_spp_0 = io_ptw_status_spp; // @[FrontendTLB.scala:29:7] wire io_ptw_status_mpie_0 = io_ptw_status_mpie; // @[FrontendTLB.scala:29:7] wire io_ptw_status_spie_0 = io_ptw_status_spie; // @[FrontendTLB.scala:29:7] wire io_ptw_status_mie_0 = io_ptw_status_mie; // @[FrontendTLB.scala:29:7] wire io_ptw_status_sie_0 = io_ptw_status_sie; // @[FrontendTLB.scala:29:7] wire io_ptw_hstatus_spvp_0 = io_ptw_hstatus_spvp; // @[FrontendTLB.scala:29:7] wire io_ptw_hstatus_spv_0 = io_ptw_hstatus_spv; // @[FrontendTLB.scala:29:7] wire io_ptw_hstatus_gva_0 = io_ptw_hstatus_gva; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_debug_0 = io_ptw_gstatus_debug; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_cease_0 = io_ptw_gstatus_cease; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_wfi_0 = io_ptw_gstatus_wfi; // @[FrontendTLB.scala:29:7] wire [31:0] io_ptw_gstatus_isa_0 = io_ptw_gstatus_isa; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_gstatus_dprv_0 = io_ptw_gstatus_dprv; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_dv_0 = io_ptw_gstatus_dv; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_gstatus_prv_0 = io_ptw_gstatus_prv; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_v_0 = io_ptw_gstatus_v; // @[FrontendTLB.scala:29:7] wire [22:0] io_ptw_gstatus_zero2_0 = io_ptw_gstatus_zero2; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_mpv_0 = io_ptw_gstatus_mpv; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_gva_0 = io_ptw_gstatus_gva; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_mbe_0 = io_ptw_gstatus_mbe; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_sbe_0 = io_ptw_gstatus_sbe; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_gstatus_sxl_0 = io_ptw_gstatus_sxl; // @[FrontendTLB.scala:29:7] wire [7:0] io_ptw_gstatus_zero1_0 = io_ptw_gstatus_zero1; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_tsr_0 = io_ptw_gstatus_tsr; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_tw_0 = io_ptw_gstatus_tw; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_tvm_0 = io_ptw_gstatus_tvm; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_mxr_0 = io_ptw_gstatus_mxr; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_sum_0 = io_ptw_gstatus_sum; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_mprv_0 = io_ptw_gstatus_mprv; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_gstatus_fs_0 = io_ptw_gstatus_fs; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_gstatus_mpp_0 = io_ptw_gstatus_mpp; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_gstatus_vs_0 = io_ptw_gstatus_vs; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_spp_0 = io_ptw_gstatus_spp; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_mpie_0 = io_ptw_gstatus_mpie; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_ube_0 = io_ptw_gstatus_ube; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_spie_0 = io_ptw_gstatus_spie; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_upie_0 = io_ptw_gstatus_upie; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_mie_0 = io_ptw_gstatus_mie; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_hie_0 = io_ptw_gstatus_hie; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_sie_0 = io_ptw_gstatus_sie; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_uie_0 = io_ptw_gstatus_uie; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_0_cfg_l_0 = io_ptw_pmp_0_cfg_l; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_pmp_0_cfg_a_0 = io_ptw_pmp_0_cfg_a; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_0_cfg_x_0 = io_ptw_pmp_0_cfg_x; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_0_cfg_w_0 = io_ptw_pmp_0_cfg_w; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_0_cfg_r_0 = io_ptw_pmp_0_cfg_r; // @[FrontendTLB.scala:29:7] wire [29:0] io_ptw_pmp_0_addr_0 = io_ptw_pmp_0_addr; // @[FrontendTLB.scala:29:7] wire [31:0] io_ptw_pmp_0_mask_0 = io_ptw_pmp_0_mask; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_1_cfg_l_0 = io_ptw_pmp_1_cfg_l; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_pmp_1_cfg_a_0 = io_ptw_pmp_1_cfg_a; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_1_cfg_x_0 = io_ptw_pmp_1_cfg_x; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_1_cfg_w_0 = io_ptw_pmp_1_cfg_w; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_1_cfg_r_0 = io_ptw_pmp_1_cfg_r; // @[FrontendTLB.scala:29:7] wire [29:0] io_ptw_pmp_1_addr_0 = io_ptw_pmp_1_addr; // @[FrontendTLB.scala:29:7] wire [31:0] io_ptw_pmp_1_mask_0 = io_ptw_pmp_1_mask; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_2_cfg_l_0 = io_ptw_pmp_2_cfg_l; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_pmp_2_cfg_a_0 = io_ptw_pmp_2_cfg_a; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_2_cfg_x_0 = io_ptw_pmp_2_cfg_x; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_2_cfg_w_0 = io_ptw_pmp_2_cfg_w; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_2_cfg_r_0 = io_ptw_pmp_2_cfg_r; // @[FrontendTLB.scala:29:7] wire [29:0] io_ptw_pmp_2_addr_0 = io_ptw_pmp_2_addr; // @[FrontendTLB.scala:29:7] wire [31:0] io_ptw_pmp_2_mask_0 = io_ptw_pmp_2_mask; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_3_cfg_l_0 = io_ptw_pmp_3_cfg_l; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_pmp_3_cfg_a_0 = io_ptw_pmp_3_cfg_a; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_3_cfg_x_0 = io_ptw_pmp_3_cfg_x; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_3_cfg_w_0 = io_ptw_pmp_3_cfg_w; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_3_cfg_r_0 = io_ptw_pmp_3_cfg_r; // @[FrontendTLB.scala:29:7] wire [29:0] io_ptw_pmp_3_addr_0 = io_ptw_pmp_3_addr; // @[FrontendTLB.scala:29:7] wire [31:0] io_ptw_pmp_3_mask_0 = io_ptw_pmp_3_mask; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_4_cfg_l_0 = io_ptw_pmp_4_cfg_l; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_pmp_4_cfg_a_0 = io_ptw_pmp_4_cfg_a; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_4_cfg_x_0 = io_ptw_pmp_4_cfg_x; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_4_cfg_w_0 = io_ptw_pmp_4_cfg_w; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_4_cfg_r_0 = io_ptw_pmp_4_cfg_r; // @[FrontendTLB.scala:29:7] wire [29:0] io_ptw_pmp_4_addr_0 = io_ptw_pmp_4_addr; // @[FrontendTLB.scala:29:7] wire [31:0] io_ptw_pmp_4_mask_0 = io_ptw_pmp_4_mask; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_5_cfg_l_0 = io_ptw_pmp_5_cfg_l; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_pmp_5_cfg_a_0 = io_ptw_pmp_5_cfg_a; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_5_cfg_x_0 = io_ptw_pmp_5_cfg_x; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_5_cfg_w_0 = io_ptw_pmp_5_cfg_w; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_5_cfg_r_0 = io_ptw_pmp_5_cfg_r; // @[FrontendTLB.scala:29:7] wire [29:0] io_ptw_pmp_5_addr_0 = io_ptw_pmp_5_addr; // @[FrontendTLB.scala:29:7] wire [31:0] io_ptw_pmp_5_mask_0 = io_ptw_pmp_5_mask; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_6_cfg_l_0 = io_ptw_pmp_6_cfg_l; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_pmp_6_cfg_a_0 = io_ptw_pmp_6_cfg_a; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_6_cfg_x_0 = io_ptw_pmp_6_cfg_x; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_6_cfg_w_0 = io_ptw_pmp_6_cfg_w; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_6_cfg_r_0 = io_ptw_pmp_6_cfg_r; // @[FrontendTLB.scala:29:7] wire [29:0] io_ptw_pmp_6_addr_0 = io_ptw_pmp_6_addr; // @[FrontendTLB.scala:29:7] wire [31:0] io_ptw_pmp_6_mask_0 = io_ptw_pmp_6_mask; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_7_cfg_l_0 = io_ptw_pmp_7_cfg_l; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_pmp_7_cfg_a_0 = io_ptw_pmp_7_cfg_a; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_7_cfg_x_0 = io_ptw_pmp_7_cfg_x; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_7_cfg_w_0 = io_ptw_pmp_7_cfg_w; // @[FrontendTLB.scala:29:7] wire io_ptw_pmp_7_cfg_r_0 = io_ptw_pmp_7_cfg_r; // @[FrontendTLB.scala:29:7] wire [29:0] io_ptw_pmp_7_addr_0 = io_ptw_pmp_7_addr; // @[FrontendTLB.scala:29:7] wire [31:0] io_ptw_pmp_7_mask_0 = io_ptw_pmp_7_mask; // @[FrontendTLB.scala:29:7] wire io_ptw_customCSRs_csrs_0_ren_0 = io_ptw_customCSRs_csrs_0_ren; // @[FrontendTLB.scala:29:7] wire io_ptw_customCSRs_csrs_0_wen_0 = io_ptw_customCSRs_csrs_0_wen; // @[FrontendTLB.scala:29:7] wire [63:0] io_ptw_customCSRs_csrs_0_wdata_0 = io_ptw_customCSRs_csrs_0_wdata; // @[FrontendTLB.scala:29:7] wire [63:0] io_ptw_customCSRs_csrs_0_value_0 = io_ptw_customCSRs_csrs_0_value; // @[FrontendTLB.scala:29:7] wire io_ptw_customCSRs_csrs_1_ren_0 = io_ptw_customCSRs_csrs_1_ren; // @[FrontendTLB.scala:29:7] wire io_ptw_customCSRs_csrs_1_wen_0 = io_ptw_customCSRs_csrs_1_wen; // @[FrontendTLB.scala:29:7] wire [63:0] io_ptw_customCSRs_csrs_1_wdata_0 = io_ptw_customCSRs_csrs_1_wdata; // @[FrontendTLB.scala:29:7] wire [63:0] io_ptw_customCSRs_csrs_1_value_0 = io_ptw_customCSRs_csrs_1_value; // @[FrontendTLB.scala:29:7] wire io_ptw_customCSRs_csrs_2_ren_0 = io_ptw_customCSRs_csrs_2_ren; // @[FrontendTLB.scala:29:7] wire io_ptw_customCSRs_csrs_2_wen_0 = io_ptw_customCSRs_csrs_2_wen; // @[FrontendTLB.scala:29:7] wire [63:0] io_ptw_customCSRs_csrs_2_wdata_0 = io_ptw_customCSRs_csrs_2_wdata; // @[FrontendTLB.scala:29:7] wire [63:0] io_ptw_customCSRs_csrs_2_value_0 = io_ptw_customCSRs_csrs_2_value; // @[FrontendTLB.scala:29:7] wire io_ptw_customCSRs_csrs_3_ren_0 = io_ptw_customCSRs_csrs_3_ren; // @[FrontendTLB.scala:29:7] wire io_ptw_customCSRs_csrs_3_wen_0 = io_ptw_customCSRs_csrs_3_wen; // @[FrontendTLB.scala:29:7] wire [63:0] io_ptw_customCSRs_csrs_3_wdata_0 = io_ptw_customCSRs_csrs_3_wdata; // @[FrontendTLB.scala:29:7] wire [63:0] io_ptw_customCSRs_csrs_3_value_0 = io_ptw_customCSRs_csrs_3_value; // @[FrontendTLB.scala:29:7] wire io_exp_flush_retry_0 = io_exp_flush_retry; // @[FrontendTLB.scala:29:7] wire io_exp_flush_skip_0 = io_exp_flush_skip; // @[FrontendTLB.scala:29:7] wire io_counter_external_reset_0 = io_counter_external_reset; // @[FrontendTLB.scala:29:7] wire io_req_bits_tlb_req_passthrough = 1'h0; // @[FrontendTLB.scala:29:7] wire io_req_bits_tlb_req_v = 1'h0; // @[FrontendTLB.scala:29:7] wire io_resp_gpa_is_pte = 1'h0; // @[FrontendTLB.scala:29:7] wire io_resp_gf_ld = 1'h0; // @[FrontendTLB.scala:29:7] wire io_resp_gf_st = 1'h0; // @[FrontendTLB.scala:29:7] wire io_resp_gf_inst = 1'h0; // @[FrontendTLB.scala:29:7] wire io_resp_ma_ld = 1'h0; // @[FrontendTLB.scala:29:7] wire io_resp_ma_st = 1'h0; // @[FrontendTLB.scala:29:7] wire io_resp_ma_inst = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_req_bits_bits_vstage1 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_req_bits_bits_stage2 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_resp_bits_fragmented_superpage = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_status_mbe = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_status_sbe = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_status_sd_rv32 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_status_ube = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_status_upie = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_status_hie = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_status_uie = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_hstatus_vtsr = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_hstatus_vtw = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_hstatus_vtvm = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_hstatus_hu = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_hstatus_vsbe = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_sd_rv32 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_customCSRs_csrs_0_stall = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_customCSRs_csrs_0_set = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_customCSRs_csrs_1_stall = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_customCSRs_csrs_1_set = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_customCSRs_csrs_2_stall = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_customCSRs_csrs_2_set = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_customCSRs_csrs_3_stall = 1'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_customCSRs_csrs_3_set = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_0 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_1 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_2 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_3 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_4 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_5 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_6 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_7 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_8 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_9 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_10 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_11 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_12 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_13 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_14 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_18 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_19 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_20 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_21 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_22 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_23 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_24 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_25 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_26 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_27 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_28 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_29 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_30 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_31 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_32 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_33 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_34 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_35 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_36 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_37 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_38 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_39 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_40 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_41 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_42 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_43 = 1'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_44 = 1'h0; // @[FrontendTLB.scala:29:7] wire [2:0] io_req_bits_tlb_req_size = 3'h0; // @[FrontendTLB.scala:29:7] wire [1:0] io_req_bits_tlb_req_prv = 2'h0; // @[FrontendTLB.scala:29:7] wire [1:0] io_resp_size = 2'h0; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_status_vs = 2'h0; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_hstatus_zero3 = 2'h0; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_hstatus_zero2 = 2'h0; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_pmp_0_cfg_res = 2'h0; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_pmp_1_cfg_res = 2'h0; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_pmp_2_cfg_res = 2'h0; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_pmp_3_cfg_res = 2'h0; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_pmp_4_cfg_res = 2'h0; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_pmp_5_cfg_res = 2'h0; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_pmp_6_cfg_res = 2'h0; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_pmp_7_cfg_res = 2'h0; // @[FrontendTLB.scala:29:7] wire [15:0] io_ptw_ptbr_asid = 16'h0; // @[FrontendTLB.scala:29:7] wire [15:0] io_ptw_hgatp_asid = 16'h0; // @[FrontendTLB.scala:29:7] wire [15:0] io_ptw_vsatp_asid = 16'h0; // @[FrontendTLB.scala:29:7] wire [3:0] io_ptw_hgatp_mode = 4'h0; // @[FrontendTLB.scala:29:7] wire [3:0] io_ptw_vsatp_mode = 4'h0; // @[FrontendTLB.scala:29:7] wire [43:0] io_ptw_hgatp_ppn = 44'h0; // @[FrontendTLB.scala:29:7] wire [43:0] io_ptw_vsatp_ppn = 44'h0; // @[FrontendTLB.scala:29:7] wire io_ptw_req_bits_valid = 1'h1; // @[FrontendTLB.scala:29:7] wire io_ptw_status_sd = 1'h1; // @[FrontendTLB.scala:29:7] wire io_ptw_gstatus_sd = 1'h1; // @[FrontendTLB.scala:29:7] wire [22:0] io_ptw_status_zero2 = 23'h0; // @[FrontendTLB.scala:29:7] wire [7:0] io_ptw_status_zero1 = 8'h0; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_status_xs = 2'h3; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_gstatus_xs = 2'h3; // @[FrontendTLB.scala:29:7] wire [29:0] io_ptw_hstatus_zero6 = 30'h0; // @[FrontendTLB.scala:29:7] wire [8:0] io_ptw_hstatus_zero5 = 9'h0; // @[FrontendTLB.scala:29:7] wire [5:0] io_ptw_hstatus_vgein = 6'h0; // @[FrontendTLB.scala:29:7] wire [4:0] io_ptw_hstatus_zero1 = 5'h0; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_status_sxl = 2'h2; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_status_uxl = 2'h2; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_hstatus_vsxl = 2'h2; // @[FrontendTLB.scala:29:7] wire [1:0] io_ptw_gstatus_uxl = 2'h2; // @[FrontendTLB.scala:29:7] wire [63:0] io_ptw_customCSRs_csrs_0_sdata = 64'h0; // @[FrontendTLB.scala:29:7] wire [63:0] io_ptw_customCSRs_csrs_1_sdata = 64'h0; // @[FrontendTLB.scala:29:7] wire [63:0] io_ptw_customCSRs_csrs_2_sdata = 64'h0; // @[FrontendTLB.scala:29:7] wire [63:0] io_ptw_customCSRs_csrs_3_sdata = 64'h0; // @[FrontendTLB.scala:29:7] wire [31:0] io_counter_external_values_0 = 32'h0; // @[FrontendTLB.scala:29:7] wire [31:0] io_counter_external_values_1 = 32'h0; // @[FrontendTLB.scala:29:7] wire [31:0] io_counter_external_values_2 = 32'h0; // @[FrontendTLB.scala:29:7] wire [31:0] io_counter_external_values_3 = 32'h0; // @[FrontendTLB.scala:29:7] wire [31:0] io_counter_external_values_4 = 32'h0; // @[FrontendTLB.scala:29:7] wire [31:0] io_counter_external_values_5 = 32'h0; // @[FrontendTLB.scala:29:7] wire [31:0] io_counter_external_values_6 = 32'h0; // @[FrontendTLB.scala:29:7] wire [31:0] io_counter_external_values_7 = 32'h0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_17_0 = io_req_valid_0; // @[FrontendTLB.scala:29:7] wire io_resp_pf_ld_0; // @[FrontendTLB.scala:29:7] wire io_resp_pf_st_0; // @[FrontendTLB.scala:29:7] wire io_resp_pf_inst_0; // @[FrontendTLB.scala:29:7] wire io_resp_ae_ld_0; // @[FrontendTLB.scala:29:7] wire io_resp_ae_st_0; // @[FrontendTLB.scala:29:7] wire io_resp_ae_inst_0; // @[FrontendTLB.scala:29:7] wire io_resp_miss_0; // @[FrontendTLB.scala:29:7] wire [31:0] io_resp_paddr_0; // @[FrontendTLB.scala:29:7] wire [39:0] io_resp_gpa_0; // @[FrontendTLB.scala:29:7] wire io_resp_cacheable_0; // @[FrontendTLB.scala:29:7] wire io_resp_must_alloc_0; // @[FrontendTLB.scala:29:7] wire io_resp_prefetchable_0; // @[FrontendTLB.scala:29:7] wire [4:0] io_resp_cmd_0; // @[FrontendTLB.scala:29:7] wire [26:0] io_ptw_req_bits_bits_addr_0; // @[FrontendTLB.scala:29:7] wire io_ptw_req_bits_bits_need_gpa_0; // @[FrontendTLB.scala:29:7] wire io_ptw_req_valid_0; // @[FrontendTLB.scala:29:7] wire io_exp_interrupt_0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_15_0; // @[FrontendTLB.scala:29:7] wire io_counter_event_signal_16_0; // @[FrontendTLB.scala:29:7] reg interrupt; // @[FrontendTLB.scala:43:26] assign io_exp_interrupt_0 = interrupt; // @[FrontendTLB.scala:29:7, :43:26] wire _tlb_io_sfence_valid_T = io_exp_flush_retry_0 | io_exp_flush_skip_0; // @[FrontendTLB.scala:25:49, :29:7] wire _exception_T = io_req_bits_tlb_req_cmd_0 == 5'h0; // @[FrontendTLB.scala:29:7, :62:63] wire _exception_T_1 = _tlb_io_resp_pf_ld | _tlb_io_resp_ae_ld; // @[FrontendTLB.scala:46:19, :62:92] wire _exception_T_2 = _tlb_io_resp_pf_st | _tlb_io_resp_ae_st; // @[FrontendTLB.scala:46:19, :62:132] wire _exception_T_3 = _exception_T ? _exception_T_1 : _exception_T_2; // @[FrontendTLB.scala:62:{38,63,92,132}] wire exception = io_req_valid_0 & _exception_T_3; // @[FrontendTLB.scala:29:7, :62:{32,38}]
Generate the Verilog code corresponding to the following Chisel files. File INToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import consts._ class INToRecFN(intWidth: Int, expWidth: Int, sigWidth: Int) extends RawModule { override def desiredName = s"INToRecFN_i${intWidth}_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val signedIn = Input(Bool()) val in = Input(Bits(intWidth.W)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val intAsRawFloat = rawFloatFromIN(io.signedIn, io.in); val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( intAsRawFloat.expWidth, intWidth, expWidth, sigWidth, flRoundOpt_sigMSBitAlwaysZero | flRoundOpt_neverUnderflows )) roundAnyRawFNToRecFN.io.invalidExc := false.B roundAnyRawFNToRecFN.io.infiniteExc := false.B roundAnyRawFNToRecFN.io.in := intAsRawFloat roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags } File primitives.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object lowMask { def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt = { require(topBound != bottomBound) val numInVals = BigInt(1)<<in.getWidth if (topBound < bottomBound) { lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound) } else if (numInVals > 64 /* Empirical */) { // For simulation performance, we should avoid generating // exteremely wide shifters, so we divide and conquer. // Empirically, this does not impact synthesis QoR. val mid = numInVals / 2 val msb = in(in.getWidth - 1) val lsbs = in(in.getWidth - 2, 0) if (mid < topBound) { if (mid <= bottomBound) { Mux(msb, lowMask(lsbs, topBound - mid, bottomBound - mid), 0.U ) } else { Mux(msb, lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U, lowMask(lsbs, mid, bottomBound) ) } } else { ~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound)) } } else { val shift = (BigInt(-1)<<numInVals.toInt).S>>in Reverse( shift( (numInVals - 1 - bottomBound).toInt, (numInVals - topBound).toInt ) ) } } } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object countLeadingZeros { def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse) } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object orReduceBy2 { def apply(in: UInt): UInt = { val reducedWidth = (in.getWidth + 1)>>1 val reducedVec = Wire(Vec(reducedWidth, Bool())) for (ix <- 0 until reducedWidth - 1) { reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR } reducedVec(reducedWidth - 1) := in(in.getWidth - 1, (reducedWidth - 1) * 2).orR reducedVec.asUInt } } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object orReduceBy4 { def apply(in: UInt): UInt = { val reducedWidth = (in.getWidth + 3)>>2 val reducedVec = Wire(Vec(reducedWidth, Bool())) for (ix <- 0 until reducedWidth - 1) { reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR } reducedVec(reducedWidth - 1) := in(in.getWidth - 1, (reducedWidth - 1) * 4).orR reducedVec.asUInt } } File rawFloatFromIN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ object rawFloatFromIN { def apply(signedIn: Bool, in: Bits): RawFloat = { val expWidth = log2Up(in.getWidth) + 1 //*** CHANGE THIS; CAN BE VERY LARGE: val extIntWidth = 1<<(expWidth - 1) val sign = signedIn && in(in.getWidth - 1) val absIn = Mux(sign, -in.asUInt, in.asUInt) val extAbsIn = (0.U(extIntWidth.W) ## absIn)(extIntWidth - 1, 0) val adjustedNormDist = countLeadingZeros(extAbsIn) val sig = (extAbsIn<<adjustedNormDist)( extIntWidth - 1, extIntWidth - in.getWidth) val out = Wire(new RawFloat(expWidth, in.getWidth)) out.isNaN := false.B out.isInf := false.B out.isZero := ! sig(in.getWidth - 1) out.sign := sign out.sExp := (2.U(2.W) ## ~adjustedNormDist(expWidth - 2, 0)).zext out.sig := sig out } }
module INToRecFN_i1_e8_s24(); // @[INToRecFN.scala:43:7] wire [1:0] _intAsRawFloat_absIn_T = 2'h3; // @[rawFloatFromIN.scala:52:31] wire [2:0] _intAsRawFloat_extAbsIn_T = 3'h1; // @[rawFloatFromIN.scala:53:44] wire [2:0] _intAsRawFloat_sig_T = 3'h2; // @[rawFloatFromIN.scala:56:22] wire [2:0] _intAsRawFloat_out_sExp_T_2 = 3'h4; // @[rawFloatFromIN.scala:64:33] wire [3:0] intAsRawFloat_sExp = 4'h4; // @[rawFloatFromIN.scala:59:23, :64:72] wire [3:0] _intAsRawFloat_out_sExp_T_3 = 4'h4; // @[rawFloatFromIN.scala:59:23, :64:72] wire [1:0] intAsRawFloat_extAbsIn = 2'h1; // @[rawFloatFromIN.scala:53:53, :59:23, :65:20] wire [1:0] intAsRawFloat_sig = 2'h1; // @[rawFloatFromIN.scala:53:53, :59:23, :65:20] wire [4:0] io_exceptionFlags = 5'h0; // @[INToRecFN.scala:43:7, :46:16, :60:15] wire [32:0] io_out = 33'h80000000; // @[INToRecFN.scala:43:7, :46:16, :60:15] wire [2:0] io_roundingMode = 3'h0; // @[INToRecFN.scala:43:7, :46:16, :60:15] wire io_in = 1'h1; // @[Mux.scala:50:70] wire io_detectTininess = 1'h1; // @[Mux.scala:50:70] wire _intAsRawFloat_sign_T = 1'h1; // @[Mux.scala:50:70] wire _intAsRawFloat_absIn_T_1 = 1'h1; // @[Mux.scala:50:70] wire intAsRawFloat_absIn = 1'h1; // @[Mux.scala:50:70] wire _intAsRawFloat_adjustedNormDist_T = 1'h1; // @[Mux.scala:50:70] wire intAsRawFloat_adjustedNormDist = 1'h1; // @[Mux.scala:50:70] wire intAsRawFloat_sig_0 = 1'h1; // @[Mux.scala:50:70] wire _intAsRawFloat_out_isZero_T = 1'h1; // @[Mux.scala:50:70] wire _intAsRawFloat_out_sExp_T = 1'h1; // @[Mux.scala:50:70] wire io_signedIn = 1'h0; // @[INToRecFN.scala:43:7] wire intAsRawFloat_sign = 1'h0; // @[rawFloatFromIN.scala:51:29] wire _intAsRawFloat_adjustedNormDist_T_1 = 1'h0; // @[primitives.scala:91:52] wire intAsRawFloat_isNaN = 1'h0; // @[rawFloatFromIN.scala:59:23] wire intAsRawFloat_isInf = 1'h0; // @[rawFloatFromIN.scala:59:23] wire intAsRawFloat_isZero = 1'h0; // @[rawFloatFromIN.scala:59:23] wire intAsRawFloat_sign_0 = 1'h0; // @[rawFloatFromIN.scala:59:23] wire _intAsRawFloat_out_isZero_T_1 = 1'h0; // @[rawFloatFromIN.scala:62:23] wire _intAsRawFloat_out_sExp_T_1 = 1'h0; // @[rawFloatFromIN.scala:64:36] RoundAnyRawFNToRecFN_ie2_is1_oe8_os24 roundAnyRawFNToRecFN (); // @[INToRecFN.scala:60:15] endmodule
Generate the Verilog code corresponding to the following Chisel files. File UnsafeAXI4ToTL.scala: package ara import chisel3._ import chisel3.util._ import freechips.rocketchip.amba._ import freechips.rocketchip.amba.axi4._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle { val data = UInt(dataWidth.W) val resp = UInt(respWidth.W) val last = Bool() val user = BundleMap(userFields) } /** Parameters for [[BaseReservableListBuffer]] and all child classes. * * @param numEntries Total number of elements that can be stored in the 'data' RAM * @param numLists Maximum number of linked lists * @param numBeats Maximum number of beats per entry */ case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) { // Avoid zero-width wires when we call 'log2Ceil' val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries) val listBits = if (numLists == 1) 1 else log2Ceil(numLists) val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats) } case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName) extends MixedAdapterNode(AXI4Imp, TLImp)( dFn = { case mp => TLMasterPortParameters.v2( masters = mp.masters.zipWithIndex.map { case (m, i) => // Support 'numTlTxns' read requests and 'numTlTxns' write requests at once. val numSourceIds = numTlTxns * 2 TLMasterParameters.v2( name = m.name, sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds), nodePath = m.nodePath ) }, echoFields = mp.echoFields, requestFields = AMBAProtField() +: mp.requestFields, responseKeys = mp.responseKeys ) }, uFn = { mp => AXI4SlavePortParameters( slaves = mp.managers.map { m => val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits)) AXI4SlaveParameters( address = m.address, resources = m.resources, regionType = m.regionType, executable = m.executable, nodePath = m.nodePath, supportsWrite = m.supportsPutPartial.intersect(maxXfer), supportsRead = m.supportsGet.intersect(maxXfer), interleavedId = Some(0) // TL2 never interleaves D beats ) }, beatBytes = mp.beatBytes, minLatency = mp.minLatency, responseFields = mp.responseFields, requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt) ) } ) class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule { require(numTlTxns >= 1) require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2") val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt) lazy val module = new LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => edgeIn.master.masters.foreach { m => require(m.aligned, "AXI4ToTL requires aligned requests") } val numIds = edgeIn.master.endId val beatBytes = edgeOut.slave.beatBytes val maxTransfer = edgeOut.slave.maxTransfer val maxBeats = maxTransfer / beatBytes // Look for an Error device to redirect bad requests val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError") require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.") val errorDev = errorDevs.maxBy(_.maxTransfer) val errorDevAddr = errorDev.address.head.base require( errorDev.supportsPutPartial.contains(maxTransfer), s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer" ) require( errorDev.supportsGet.contains(maxTransfer), s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer" ) // All of the read-response reordering logic. val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields) val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats) val listBuffer = if (numTlTxns > 1) { Module(new ReservableListBuffer(listBufData, listBufParams)) } else { Module(new PassthroughListBuffer(listBufData, listBufParams)) } // To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to // 0 for read requests and 1 for write requests. val isReadSourceBit = 0.U(1.W) val isWriteSourceBit = 1.U(1.W) /* Read request logic */ val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val rBytes1 = in.ar.bits.bytes1() val rSize = OH1ToUInt(rBytes1) val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize) val rId = if (numTlTxns > 1) { Cat(isReadSourceBit, listBuffer.ioReservedIndex) } else { isReadSourceBit } val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Indicates if there are still valid TileLink source IDs left to use. val canIssueR = listBuffer.ioReserve.ready listBuffer.ioReserve.bits := in.ar.bits.id listBuffer.ioReserve.valid := in.ar.valid && rOut.ready in.ar.ready := rOut.ready && canIssueR rOut.valid := in.ar.valid && canIssueR rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2 rOut.bits.user :<= in.ar.bits.user rOut.bits.user.lift(AMBAProt).foreach { rProt => rProt.privileged := in.ar.bits.prot(0) rProt.secure := !in.ar.bits.prot(1) rProt.fetch := in.ar.bits.prot(2) rProt.bufferable := in.ar.bits.cache(0) rProt.modifiable := in.ar.bits.cache(1) rProt.readalloc := in.ar.bits.cache(2) rProt.writealloc := in.ar.bits.cache(3) } /* Write request logic */ // Strip off the MSB, which identifies the transaction as read vs write. val strippedResponseSourceId = if (numTlTxns > 1) { out.d.bits.source((out.d.bits.source).getWidth - 2, 0) } else { // When there's only 1 TileLink transaction allowed for read/write, then this field is always 0. 0.U(1.W) } // Track when a write request burst is in progress. val writeBurstBusy = RegInit(false.B) when(in.w.fire) { writeBurstBusy := !in.w.bits.last } val usedWriteIds = RegInit(0.U(numTlTxns.W)) val canIssueW = !usedWriteIds.andR val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W)) val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W)) usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet // Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't // change mid-burst. val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W)) val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy val freeWriteIdIndex = OHToUInt(freeWriteIdOH) freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val wBytes1 = in.aw.bits.bytes1() val wSize = OH1ToUInt(wBytes1) val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize) val wId = if (numTlTxns > 1) { Cat(isWriteSourceBit, freeWriteIdIndex) } else { isWriteSourceBit } val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain // asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but // the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb // bits during a W-channel burst. in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW in.w.ready := wOut.ready && in.aw.valid && canIssueW wOut.valid := in.aw.valid && in.w.valid && canIssueW wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2 in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ } wOut.bits.user :<= in.aw.bits.user wOut.bits.user.lift(AMBAProt).foreach { wProt => wProt.privileged := in.aw.bits.prot(0) wProt.secure := !in.aw.bits.prot(1) wProt.fetch := in.aw.bits.prot(2) wProt.bufferable := in.aw.bits.cache(0) wProt.modifiable := in.aw.bits.cache(1) wProt.readalloc := in.aw.bits.cache(2) wProt.writealloc := in.aw.bits.cache(3) } // Merge the AXI4 read/write requests into the TL-A channel. TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut)) /* Read/write response logic */ val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle))) val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle))) val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY) val dHasData = edgeOut.hasData(out.d.bits) val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d) val dNumBeats1 = edgeOut.numBeats1(out.d.bits) // Handle cases where writeack arrives before write is done val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck) listBuffer.ioDataOut.ready := okR.ready okR.valid := listBuffer.ioDataOut.valid okB.valid := out.d.valid && !dHasData && !writeEarlyAck listBuffer.ioResponse.valid := out.d.valid && dHasData listBuffer.ioResponse.bits.index := strippedResponseSourceId listBuffer.ioResponse.bits.data.data := out.d.bits.data listBuffer.ioResponse.bits.data.resp := dResp listBuffer.ioResponse.bits.data.last := dLast listBuffer.ioResponse.bits.data.user :<= out.d.bits.user listBuffer.ioResponse.bits.count := dCount listBuffer.ioResponse.bits.numBeats1 := dNumBeats1 okR.bits.id := listBuffer.ioDataOut.bits.listIndex okR.bits.data := listBuffer.ioDataOut.bits.payload.data okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp okR.bits.last := listBuffer.ioDataOut.bits.payload.last okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user // Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write // response, mark the write transaction as complete. val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W)) val writeResponseId = writeIdMap.read(strippedResponseSourceId) when(wOut.fire) { writeIdMap.write(freeWriteIdIndex, in.aw.bits.id) } when(edgeOut.done(wOut)) { usedWriteIdsSet := freeWriteIdOH } when(okB.fire) { usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns) } okB.bits.id := writeResponseId okB.bits.resp := dResp okB.bits.user :<= out.d.bits.user // AXI4 needs irrevocable behaviour in.r <> Queue.irrevocable(okR, 1, flow = true) in.b <> Queue.irrevocable(okB, 1, flow = true) // Unused channels out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B /* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */ def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = { val lReqType = reqType.toLowerCase when(a.valid) { assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U) // Narrow transfers and FIXED bursts must be single-beat bursts. when(a.bits.len =/= 0.U) { assert( a.bits.size === log2Ceil(beatBytes).U, s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)", 1.U << a.bits.size, a.bits.len + 1.U ) assert( a.bits.burst =/= AXI4Parameters.BURST_FIXED, s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)", a.bits.len + 1.U ) } // Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in // particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink // Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts. } } checkRequest(in.ar, "Read") checkRequest(in.aw, "Write") } } } object UnsafeAXI4ToTL { def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = { val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt)) axi42tl.node } } /* ReservableListBuffer logic, and associated classes. */ class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle { val index = UInt(params.entryBits.W) val count = UInt(params.beatBits.W) val numBeats1 = UInt(params.beatBits.W) } class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle { val listIndex = UInt(params.listBits.W) } /** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */ abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends Module { require(params.numEntries > 0) require(params.numLists > 0) val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W)))) val ioReservedIndex = IO(Output(UInt(params.entryBits.W))) val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params)))) val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params))) } /** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve * linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the * 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a * given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order. * * ==Constructor== * @param gen Chisel type of linked list data element * @param params Other parameters * * ==Module IO== * @param ioReserve Index of list to reserve a new element in * @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire' * @param ioResponse Payload containing response data and linked-list-entry index * @param ioDataOut Payload containing data read from response linked list and linked list index */ class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { val valid = RegInit(0.U(params.numLists.W)) val head = Mem(params.numLists, UInt(params.entryBits.W)) val tail = Mem(params.numLists, UInt(params.entryBits.W)) val used = RegInit(0.U(params.numEntries.W)) val next = Mem(params.numEntries, UInt(params.entryBits.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) } val dataIsPresent = RegInit(0.U(params.numEntries.W)) val beats = Mem(params.numEntries, UInt(params.beatBits.W)) // The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower. val dataMemReadEnable = WireDefault(false.B) val dataMemWriteEnable = WireDefault(false.B) assert(!(dataMemReadEnable && dataMemWriteEnable)) // 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the // lowest-index entry in the 'data' RAM which is free. val freeOH = Wire(UInt(params.numEntries.W)) val freeIndex = OHToUInt(freeOH) freeOH := ~(leftOR(~used) << 1) & ~used ioReservedIndex := freeIndex val validSet = WireDefault(0.U(params.numLists.W)) val validClr = WireDefault(0.U(params.numLists.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) val dataIsPresentSet = WireDefault(0.U(params.numEntries.W)) val dataIsPresentClr = WireDefault(0.U(params.numEntries.W)) valid := (valid & ~validClr) | validSet used := (used & ~usedClr) | usedSet dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet /* Reservation logic signals */ val reserveTail = Wire(UInt(params.entryBits.W)) val reserveIsValid = Wire(Bool()) /* Response logic signals */ val responseIndex = Wire(UInt(params.entryBits.W)) val responseListIndex = Wire(UInt(params.listBits.W)) val responseHead = Wire(UInt(params.entryBits.W)) val responseTail = Wire(UInt(params.entryBits.W)) val nextResponseHead = Wire(UInt(params.entryBits.W)) val nextDataIsPresent = Wire(Bool()) val isResponseInOrder = Wire(Bool()) val isEndOfList = Wire(Bool()) val isLastBeat = Wire(Bool()) val isLastResponseBeat = Wire(Bool()) val isLastUnwindBeat = Wire(Bool()) /* Reservation logic */ reserveTail := tail.read(ioReserve.bits) reserveIsValid := valid(ioReserve.bits) ioReserve.ready := !used.andR // When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we // actually start a new list, rather than appending to a list that's about to disappear. val reserveResponseSameList = ioReserve.bits === responseListIndex val appendToAndDestroyList = ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat when(ioReserve.fire) { validSet := UIntToOH(ioReserve.bits, params.numLists) usedSet := freeOH when(reserveIsValid && !appendToAndDestroyList) { next.write(reserveTail, freeIndex) }.otherwise { head.write(ioReserve.bits, freeIndex) } tail.write(ioReserve.bits, freeIndex) map.write(freeIndex, ioReserve.bits) } /* Response logic */ // The majority of the response logic (reading from and writing to the various RAMs) is common between the // response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid). // The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the // 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and // response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after // two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker. responseHead := head.read(responseListIndex) responseTail := tail.read(responseListIndex) nextResponseHead := next.read(responseIndex) nextDataIsPresent := dataIsPresent(nextResponseHead) // Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since // there isn't a next element in the linked list. isResponseInOrder := responseHead === responseIndex isEndOfList := responseHead === responseTail isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1 // When a response's last beat is sent to the output channel, mark it as completed. This can happen in two // situations: // 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM // reservation was never needed. // 2. An entry is read out of the 'data' SRAM (within the unwind FSM). when(ioDataOut.fire && isLastBeat) { // Mark the reservation as no-longer-used. usedClr := UIntToOH(responseIndex, params.numEntries) // If the response is in-order, then we're popping an element from this linked list. when(isEndOfList) { // Once we pop the last element from a linked list, mark it as no-longer-present. validClr := UIntToOH(responseListIndex, params.numLists) }.otherwise { // Move the linked list's head pointer to the new head pointer. head.write(responseListIndex, nextResponseHead) } } // If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding. when(ioResponse.fire && !isResponseInOrder) { dataMemWriteEnable := true.B when(isLastResponseBeat) { dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries) beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1) } } // Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to. val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats) (responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) => when(select && dataMemWriteEnable) { seqMem.write(ioResponse.bits.index, ioResponse.bits.data) } } /* Response unwind logic */ // Unwind FSM state definitions val sIdle :: sUnwinding :: Nil = Enum(2) val unwindState = RegInit(sIdle) val busyUnwinding = unwindState === sUnwinding val startUnwind = Wire(Bool()) val stopUnwind = Wire(Bool()) when(startUnwind) { unwindState := sUnwinding }.elsewhen(stopUnwind) { unwindState := sIdle } assert(!(startUnwind && stopUnwind)) // Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to // become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is // invalid. // // Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to // worry about overwriting the 'data' SRAM's output when we start the unwind FSM. startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent // Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of // two things happens: // 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent) // 2. There are no more outstanding responses in this list (isEndOfList) // // Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are // passing from 'ioResponse' to 'ioDataOut'. stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList) val isUnwindBurstOver = Wire(Bool()) val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable) // Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of // beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we // increment 'beatCounter' until it reaches 'unwindBeats1'. val unwindBeats1 = Reg(UInt(params.beatBits.W)) val nextBeatCounter = Wire(UInt(params.beatBits.W)) val beatCounter = RegNext(nextBeatCounter) isUnwindBurstOver := beatCounter === unwindBeats1 when(startNewBurst) { unwindBeats1 := beats.read(nextResponseHead) nextBeatCounter := 0.U }.elsewhen(dataMemReadEnable) { nextBeatCounter := beatCounter + 1.U }.otherwise { nextBeatCounter := beatCounter } // When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next // entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which // happens at the start of reading a new stored burst). val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst) responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index) // Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the // SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead // holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'. val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex) // The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid // until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle). val unwindDataIsValid = RegInit(false.B) when(dataMemReadEnable) { unwindDataIsValid := true.B }.elsewhen(ioDataOut.fire) { unwindDataIsValid := false.B } isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid // Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats. isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat) // Select which SRAM to read from based on the beat counter. val dataOutputVec = Wire(Vec(params.numBeats, gen)) val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats) (nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) => dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable) } // Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured // by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading // from. val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable) // Mark 'data' burst entries as no-longer-present as they get read out of the SRAM. when(dataMemReadEnable) { dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries) } // As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue // a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know // we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be // consumed by the output channel). val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem) // While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need // 'responseListIndex' to be coherent for the entire unwind process. val rawResponseListIndex = map.read(responseIndex) val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst) responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex) // Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are // just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that // could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be // single-ported. ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding // Either pass an in-order response to the output channel, or data read from the unwind FSM. ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder) ioDataOut.bits.listIndex := responseListIndex ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data) // It's an error to get a response that isn't associated with a valid linked list. when(ioResponse.fire || unwindDataIsValid) { assert( valid(responseListIndex), "No linked list exists at index %d, mapped from %d", responseListIndex, responseIndex ) } when(busyUnwinding && dataMemReadEnable) { assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order") } } /** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1. * * Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to * reorder any responses, or store any linked lists. */ class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1") val used = RegInit(0.U(params.numEntries.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) used := (used & ~usedClr) | usedSet ioReserve.ready := used === 0.U // Store which list index was reserved, we need to return this value when we get a response. when(ioReserve.fire) { usedSet := 1.U map.write(0.U, ioReserve.bits) } // There's only one valid linked list entry, which is at index 0. ioReservedIndex := 0.U val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1 // Mark the linked list as empty when we get the last beat in a response. // Note that 'ioResponse.fire === ioDataOut.fire'. when(ioResponse.fire && isLastResponseBeat) { usedClr := 1.U } // Always pass the response data straight through, since we never need to reorder the response data. ioDataOut.bits.listIndex := map.read(0.U) ioDataOut.bits.payload := ioResponse.bits.data ioDataOut.valid := ioResponse.valid ioResponse.ready := ioDataOut.ready }
module dataMems_203( // @[UnsafeAXI4ToTL.scala:365:62] input [4:0] R0_addr, input R0_en, input R0_clk, output [66:0] R0_data, input [4:0] W0_addr, input W0_en, input W0_clk, input [66:0] W0_data ); dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62] .R0_addr (R0_addr), .R0_en (R0_en), .R0_clk (R0_clk), .R0_data (R0_data), .W0_addr (W0_addr), .W0_en (W0_en), .W0_clk (W0_clk), .W0_data (W0_data) ); // @[UnsafeAXI4ToTL.scala:365:62] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftRegisterPriorityQueue.scala: package compressacc import chisel3._ import chisel3.util._ import chisel3.util._ // TODO : support enq & deq at the same cycle class PriorityQueueStageIO(keyWidth: Int, value: ValueInfo) extends Bundle { val output_prev = KeyValue(keyWidth, value) val output_nxt = KeyValue(keyWidth, value) val input_prev = Flipped(KeyValue(keyWidth, value)) val input_nxt = Flipped(KeyValue(keyWidth, value)) val cmd = Flipped(Valid(UInt(1.W))) val insert_here = Input(Bool()) val cur_input_keyval = Flipped(KeyValue(keyWidth, value)) val cur_output_keyval = KeyValue(keyWidth, value) } class PriorityQueueStage(keyWidth: Int, value: ValueInfo) extends Module { val io = IO(new PriorityQueueStageIO(keyWidth, value)) dontTouch(io) val CMD_DEQ = 0.U val CMD_ENQ = 1.U val MAX_VALUE = (1 << keyWidth) - 1 val key_reg = RegInit(MAX_VALUE.U(keyWidth.W)) val value_reg = Reg(value) io.output_prev.key := key_reg io.output_prev.value := value_reg io.output_nxt.key := key_reg io.output_nxt.value := value_reg io.cur_output_keyval.key := key_reg io.cur_output_keyval.value := value_reg when (io.cmd.valid) { switch (io.cmd.bits) { is (CMD_DEQ) { key_reg := io.input_nxt.key value_reg := io.input_nxt.value } is (CMD_ENQ) { when (io.insert_here) { key_reg := io.cur_input_keyval.key value_reg := io.cur_input_keyval.value } .elsewhen (key_reg >= io.cur_input_keyval.key) { key_reg := io.input_prev.key value_reg := io.input_prev.value } .otherwise { // do nothing } } } } } object PriorityQueueStage { def apply(keyWidth: Int, v: ValueInfo): PriorityQueueStage = new PriorityQueueStage(keyWidth, v) } // TODO // - This design is not scalable as the enqued_keyval is broadcasted to all the stages // - Add pipeline registers later class PriorityQueueIO(queSize: Int, keyWidth: Int, value: ValueInfo) extends Bundle { val cnt_bits = log2Ceil(queSize+1) val counter = Output(UInt(cnt_bits.W)) val enq = Flipped(Decoupled(KeyValue(keyWidth, value))) val deq = Decoupled(KeyValue(keyWidth, value)) } class PriorityQueue(queSize: Int, keyWidth: Int, value: ValueInfo) extends Module { val keyWidthInternal = keyWidth + 1 val CMD_DEQ = 0.U val CMD_ENQ = 1.U val io = IO(new PriorityQueueIO(queSize, keyWidthInternal, value)) dontTouch(io) val MAX_VALUE = ((1 << keyWidthInternal) - 1).U val cnt_bits = log2Ceil(queSize+1) // do not consider cases where we are inserting more entries then the queSize val counter = RegInit(0.U(cnt_bits.W)) io.counter := counter val full = (counter === queSize.U) val empty = (counter === 0.U) io.deq.valid := !empty io.enq.ready := !full when (io.enq.fire) { counter := counter + 1.U } when (io.deq.fire) { counter := counter - 1.U } val cmd_valid = io.enq.valid || io.deq.ready val cmd = Mux(io.enq.valid, CMD_ENQ, CMD_DEQ) assert(!(io.enq.valid && io.deq.ready)) val stages = Seq.fill(queSize)(Module(new PriorityQueueStage(keyWidthInternal, value))) for (i <- 0 until (queSize - 1)) { stages(i+1).io.input_prev <> stages(i).io.output_nxt stages(i).io.input_nxt <> stages(i+1).io.output_prev } stages(queSize-1).io.input_nxt.key := MAX_VALUE // stages(queSize-1).io.input_nxt.value := stages(queSize-1).io.input_nxt.value.symbol := 0.U // stages(queSize-1).io.input_nxt.value.child(0) := 0.U // stages(queSize-1).io.input_nxt.value.child(1) := 0.U stages(0).io.input_prev.key := io.enq.bits.key stages(0).io.input_prev.value <> io.enq.bits.value for (i <- 0 until queSize) { stages(i).io.cmd.valid := cmd_valid stages(i).io.cmd.bits := cmd stages(i).io.cur_input_keyval <> io.enq.bits } val is_large_or_equal = WireInit(VecInit(Seq.fill(queSize)(false.B))) for (i <- 0 until queSize) { is_large_or_equal(i) := (stages(i).io.cur_output_keyval.key >= io.enq.bits.key) } val is_large_or_equal_cat = Wire(UInt(queSize.W)) is_large_or_equal_cat := Cat(is_large_or_equal.reverse) val insert_here_idx = PriorityEncoder(is_large_or_equal_cat) for (i <- 0 until queSize) { when (i.U === insert_here_idx) { stages(i).io.insert_here := true.B } .otherwise { stages(i).io.insert_here := false.B } } io.deq.bits <> stages(0).io.output_prev }
module PriorityQueueStage_195( // @[ShiftRegisterPriorityQueue.scala:21:7] input clock, // @[ShiftRegisterPriorityQueue.scala:21:7] input reset, // @[ShiftRegisterPriorityQueue.scala:21:7] output [30:0] io_output_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14] output [9:0] io_output_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] output [30:0] io_output_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14] output [9:0] io_output_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] input [30:0] io_input_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14] input [9:0] io_input_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] input [30:0] io_input_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14] input [9:0] io_input_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] input io_cmd_valid, // @[ShiftRegisterPriorityQueue.scala:22:14] input io_cmd_bits, // @[ShiftRegisterPriorityQueue.scala:22:14] input io_insert_here, // @[ShiftRegisterPriorityQueue.scala:22:14] input [30:0] io_cur_input_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14] input [9:0] io_cur_input_keyval_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] output [30:0] io_cur_output_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14] output [9:0] io_cur_output_keyval_value_symbol // @[ShiftRegisterPriorityQueue.scala:22:14] ); wire [30:0] io_input_prev_key_0 = io_input_prev_key; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_input_prev_value_symbol_0 = io_input_prev_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_input_nxt_key_0 = io_input_nxt_key; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_input_nxt_value_symbol_0 = io_input_nxt_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7] wire io_cmd_valid_0 = io_cmd_valid; // @[ShiftRegisterPriorityQueue.scala:21:7] wire io_cmd_bits_0 = io_cmd_bits; // @[ShiftRegisterPriorityQueue.scala:21:7] wire io_insert_here_0 = io_insert_here; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_cur_input_keyval_key_0 = io_cur_input_keyval_key; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_cur_input_keyval_value_symbol_0 = io_cur_input_keyval_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] reg [30:0] key_reg; // @[ShiftRegisterPriorityQueue.scala:30:24] assign io_output_prev_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] assign io_output_nxt_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] assign io_cur_output_keyval_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] reg [9:0] value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:31:22] assign io_output_prev_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] assign io_output_nxt_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] assign io_cur_output_keyval_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] wire _T_2 = key_reg >= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24, :52:30] always @(posedge clock) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (reset) // @[ShiftRegisterPriorityQueue.scala:21:7] key_reg <= 31'h7FFFFFFF; // @[ShiftRegisterPriorityQueue.scala:30:24] else if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7] key_reg <= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30] key_reg <= io_input_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] end else // @[ShiftRegisterPriorityQueue.scala:21:7] key_reg <= io_input_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] end if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7] value_reg_symbol <= io_cur_input_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30] value_reg_symbol <= io_input_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] end else // @[ShiftRegisterPriorityQueue.scala:21:7] value_reg_symbol <= io_input_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] end always @(posedge) assign io_output_prev_key = io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_output_prev_value_symbol = io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_output_nxt_key = io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_output_nxt_value_symbol = io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_cur_output_keyval_key = io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_cur_output_keyval_value_symbol = io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_35( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [5:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_b_ready, // @[Monitor.scala:20:14] input io_in_b_valid, // @[Monitor.scala:20:14] input [1:0] io_in_b_bits_param, // @[Monitor.scala:20:14] input [5:0] io_in_b_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_b_bits_address, // @[Monitor.scala:20:14] input io_in_c_ready, // @[Monitor.scala:20:14] input io_in_c_valid, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_size, // @[Monitor.scala:20:14] input [5:0] io_in_c_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_c_bits_address, // @[Monitor.scala:20:14] input [63:0] io_in_c_bits_data, // @[Monitor.scala:20:14] input io_in_c_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [5:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt, // @[Monitor.scala:20:14] input io_in_e_valid, // @[Monitor.scala:20:14] input [2:0] io_in_e_bits_sink // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [5:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [31:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_b_ready_0 = io_in_b_ready; // @[Monitor.scala:36:7] wire io_in_b_valid_0 = io_in_b_valid; // @[Monitor.scala:36:7] wire [1:0] io_in_b_bits_param_0 = io_in_b_bits_param; // @[Monitor.scala:36:7] wire [5:0] io_in_b_bits_source_0 = io_in_b_bits_source; // @[Monitor.scala:36:7] wire [31:0] io_in_b_bits_address_0 = io_in_b_bits_address; // @[Monitor.scala:36:7] wire io_in_c_ready_0 = io_in_c_ready; // @[Monitor.scala:36:7] wire io_in_c_valid_0 = io_in_c_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_c_bits_opcode_0 = io_in_c_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_c_bits_param_0 = io_in_c_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_c_bits_size_0 = io_in_c_bits_size; // @[Monitor.scala:36:7] wire [5:0] io_in_c_bits_source_0 = io_in_c_bits_source; // @[Monitor.scala:36:7] wire [31:0] io_in_c_bits_address_0 = io_in_c_bits_address; // @[Monitor.scala:36:7] wire [63:0] io_in_c_bits_data_0 = io_in_c_bits_data; // @[Monitor.scala:36:7] wire io_in_c_bits_corrupt_0 = io_in_c_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [5:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7] wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_e_valid_0 = io_in_e_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_e_bits_sink_0 = io_in_e_bits_sink; // @[Monitor.scala:36:7] wire io_in_e_ready = 1'h1; // @[Monitor.scala:36:7] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_39 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_41 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_45 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_47 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_51 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_53 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_57 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_59 = 1'h1; // @[Parameters.scala:57:20] wire mask_sub_sub_sub_0_1_1 = 1'h1; // @[Misc.scala:206:21] wire mask_sub_sub_size_1 = 1'h1; // @[Misc.scala:209:26] wire mask_sub_sub_0_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_sub_1_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_0_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_1_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_2_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_3_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_size_1 = 1'h1; // @[Misc.scala:209:26] wire mask_acc_8 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_9 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_10 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_11 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_12 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_13 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_14 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_15 = 1'h1; // @[Misc.scala:215:29] wire _legal_source_T_3 = 1'h1; // @[Parameters.scala:56:32] wire _legal_source_T_5 = 1'h1; // @[Parameters.scala:57:20] wire _legal_source_T_9 = 1'h1; // @[Parameters.scala:56:32] wire _legal_source_T_11 = 1'h1; // @[Parameters.scala:57:20] wire _legal_source_T_15 = 1'h1; // @[Parameters.scala:56:32] wire _legal_source_T_17 = 1'h1; // @[Parameters.scala:57:20] wire _legal_source_T_21 = 1'h1; // @[Parameters.scala:56:32] wire _legal_source_T_23 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_75 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_77 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_81 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_83 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_87 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_89 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_93 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_95 = 1'h1; // @[Parameters.scala:57:20] wire _b_first_beats1_opdata_T = 1'h1; // @[Edges.scala:97:37] wire _b_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire b_first_last = 1'h1; // @[Edges.scala:232:33] wire [2:0] io_in_b_bits_opcode = 3'h6; // @[Monitor.scala:36:7] wire [2:0] io_in_b_bits_size = 3'h6; // @[Monitor.scala:36:7] wire [2:0] _mask_sizeOH_T_3 = 3'h6; // @[Misc.scala:202:34] wire [7:0] io_in_b_bits_mask = 8'hFF; // @[Monitor.scala:36:7] wire [7:0] mask_1 = 8'hFF; // @[Misc.scala:222:10] wire [63:0] io_in_b_bits_data = 64'h0; // @[Monitor.scala:36:7] wire io_in_b_bits_corrupt = 1'h0; // @[Monitor.scala:36:7] wire mask_sub_size_1 = 1'h0; // @[Misc.scala:209:26] wire _mask_sub_acc_T_4 = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_5 = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_6 = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_7 = 1'h0; // @[Misc.scala:215:38] wire _legal_source_T_30 = 1'h0; // @[Mux.scala:30:73] wire b_first_beats1_opdata = 1'h0; // @[Edges.scala:97:28] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [3:0] _mask_sizeOH_T_4 = 4'h4; // @[OneHot.scala:65:12] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48] wire [2:0] _mask_sizeOH_T_5 = 3'h4; // @[OneHot.scala:65:27] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] mask_sizeOH_1 = 3'h5; // @[Misc.scala:202:81] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] b_first_beats1 = 3'h0; // @[Edges.scala:221:14] wire [2:0] b_first_count = 3'h0; // @[Edges.scala:234:25] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] b_first_beats1_decode = 3'h7; // @[Edges.scala:220:59] wire [5:0] is_aligned_mask_1 = 6'h3F; // @[package.scala:243:46] wire [5:0] _b_first_beats1_decode_T_2 = 6'h3F; // @[package.scala:243:46] wire [5:0] _is_aligned_mask_T_3 = 6'h0; // @[package.scala:243:76] wire [5:0] _b_first_beats1_decode_T_1 = 6'h0; // @[package.scala:243:76] wire [12:0] _is_aligned_mask_T_2 = 13'hFC0; // @[package.scala:243:71] wire [12:0] _b_first_beats1_decode_T = 13'hFC0; // @[package.scala:243:71] wire [3:0] mask_lo_1 = 4'hF; // @[Misc.scala:222:10] wire [3:0] mask_hi_1 = 4'hF; // @[Misc.scala:222:10] wire [1:0] mask_lo_lo_1 = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_lo_hi_1 = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo_1 = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_hi_hi_1 = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_sizeOH_shiftAmount_1 = 2'h2; // @[OneHot.scala:64:49] wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire [5:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_44 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_45 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_46 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_47 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _legal_source_uncommonBits_T = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _legal_source_uncommonBits_T_1 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _legal_source_uncommonBits_T_2 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _legal_source_uncommonBits_T_3 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_48 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_49 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_50 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_51 = io_in_b_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_8 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_9 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_10 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_11 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_52 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_53 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_54 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_55 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_56 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_57 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_58 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_59 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_60 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_61 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_62 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_63 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_64 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_65 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_66 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_67 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_68 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_69 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_70 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _uncommonBits_T_71 = io_in_c_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_4 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [5:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire _source_ok_T = io_in_a_bits_source_0 == 6'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _source_ok_T_1 = io_in_a_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_7 = io_in_a_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_13 = io_in_a_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_19 = io_in_a_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire _source_ok_T_2 = _source_ok_T_1 == 4'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_8 = _source_ok_T_7 == 4'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_14 = _source_ok_T_13 == 4'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_20 = _source_ok_T_19 == 4'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31] wire _source_ok_T_25 = io_in_a_bits_source_0 == 6'h24; // @[Monitor.scala:36:7] wire _source_ok_WIRE_5 = _source_ok_T_25; // @[Parameters.scala:1138:31] wire _source_ok_T_26 = io_in_a_bits_source_0 == 6'h26; // @[Monitor.scala:36:7] wire _source_ok_WIRE_6 = _source_ok_T_26; // @[Parameters.scala:1138:31] wire _source_ok_T_27 = io_in_a_bits_source_0 == 6'h20; // @[Monitor.scala:36:7] wire _source_ok_WIRE_7 = _source_ok_T_27; // @[Parameters.scala:1138:31] wire _source_ok_T_28 = io_in_a_bits_source_0 == 6'h22; // @[Monitor.scala:36:7] wire _source_ok_WIRE_8 = _source_ok_T_28; // @[Parameters.scala:1138:31] wire _source_ok_T_29 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_30 = _source_ok_T_29 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_31 = _source_ok_T_30 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_32 = _source_ok_T_31 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_33 = _source_ok_T_32 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_34 = _source_ok_T_33 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_35 = _source_ok_T_34 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46] wire source_ok = _source_ok_T_35 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46] wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71] wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [31:0] _is_aligned_T = {26'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 32'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_36 = io_in_d_bits_source_0 == 6'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_0 = _source_ok_T_36; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _source_ok_T_37 = io_in_d_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_43 = io_in_d_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_49 = io_in_d_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_55 = io_in_d_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire _source_ok_T_38 = _source_ok_T_37 == 4'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_40 = _source_ok_T_38; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_42 = _source_ok_T_40; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_1 = _source_ok_T_42; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_44 = _source_ok_T_43 == 4'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_46 = _source_ok_T_44; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_48 = _source_ok_T_46; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_2 = _source_ok_T_48; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_50 = _source_ok_T_49 == 4'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_52 = _source_ok_T_50; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_54 = _source_ok_T_52; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_3 = _source_ok_T_54; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_56 = _source_ok_T_55 == 4'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_58 = _source_ok_T_56; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_60 = _source_ok_T_58; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_4 = _source_ok_T_60; // @[Parameters.scala:1138:31] wire _source_ok_T_61 = io_in_d_bits_source_0 == 6'h24; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_5 = _source_ok_T_61; // @[Parameters.scala:1138:31] wire _source_ok_T_62 = io_in_d_bits_source_0 == 6'h26; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_6 = _source_ok_T_62; // @[Parameters.scala:1138:31] wire _source_ok_T_63 = io_in_d_bits_source_0 == 6'h20; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_7 = _source_ok_T_63; // @[Parameters.scala:1138:31] wire _source_ok_T_64 = io_in_d_bits_source_0 == 6'h22; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_8 = _source_ok_T_64; // @[Parameters.scala:1138:31] wire _source_ok_T_65 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_66 = _source_ok_T_65 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_67 = _source_ok_T_66 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_68 = _source_ok_T_67 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_69 = _source_ok_T_68 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_70 = _source_ok_T_69 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_71 = _source_ok_T_70 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46] wire source_ok_1 = _source_ok_T_71 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46] wire sink_ok = io_in_d_bits_sink_0 != 3'h7; // @[Monitor.scala:36:7, :309:31] wire _legal_source_T = io_in_b_bits_source_0 == 6'h10; // @[Monitor.scala:36:7] wire [1:0] uncommonBits_44 = _uncommonBits_T_44[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _legal_source_T_1 = io_in_b_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _legal_source_T_7 = io_in_b_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _legal_source_T_13 = io_in_b_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _legal_source_T_19 = io_in_b_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_46 = _uncommonBits_T_46[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_47 = _uncommonBits_T_47[1:0]; // @[Parameters.scala:52:{29,56}] wire _legal_source_T_25 = io_in_b_bits_source_0 == 6'h24; // @[Monitor.scala:36:7] wire _legal_source_T_26 = io_in_b_bits_source_0 == 6'h26; // @[Monitor.scala:36:7] wire _legal_source_T_27 = io_in_b_bits_source_0 == 6'h20; // @[Monitor.scala:36:7] wire _legal_source_T_28 = io_in_b_bits_source_0 == 6'h22; // @[Monitor.scala:36:7] wire [27:0] _GEN_0 = io_in_b_bits_address_0[27:0] ^ 28'h8000000; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T = {io_in_b_bits_address_0[31:28], _GEN_0}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_1 = {1'h0, _address_ok_T}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_2 = _address_ok_T_1 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_3 = _address_ok_T_2; // @[Parameters.scala:137:46] wire _address_ok_T_4 = _address_ok_T_3 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_0 = _address_ok_T_4; // @[Parameters.scala:612:40] wire [31:0] _address_ok_T_5 = io_in_b_bits_address_0 ^ 32'h80000000; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_6 = {1'h0, _address_ok_T_5}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_7 = _address_ok_T_6 & 33'h1F0000000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_8 = _address_ok_T_7; // @[Parameters.scala:137:46] wire _address_ok_T_9 = _address_ok_T_8 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_1 = _address_ok_T_9; // @[Parameters.scala:612:40] wire address_ok = _address_ok_WIRE_0 | _address_ok_WIRE_1; // @[Parameters.scala:612:40, :636:64] wire [31:0] _is_aligned_T_1 = {26'h0, io_in_b_bits_address_0[5:0]}; // @[Monitor.scala:36:7] wire is_aligned_1 = _is_aligned_T_1 == 32'h0; // @[Edges.scala:21:{16,24}] wire mask_sub_sub_bit_1 = io_in_b_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2_1 = mask_sub_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit_1 = ~mask_sub_sub_bit_1; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2_1 = mask_sub_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T_2 = mask_sub_sub_0_2_1; // @[Misc.scala:214:27, :215:38] wire _mask_sub_sub_acc_T_3 = mask_sub_sub_1_2_1; // @[Misc.scala:214:27, :215:38] wire mask_sub_bit_1 = io_in_b_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit_1 = ~mask_sub_bit_1; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2_1 = mask_sub_sub_0_2_1 & mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire mask_sub_1_2_1 = mask_sub_sub_0_2_1 & mask_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire mask_sub_2_2_1 = mask_sub_sub_1_2_1 & mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27] wire mask_sub_3_2_1 = mask_sub_sub_1_2_1 & mask_sub_bit_1; // @[Misc.scala:210:26, :214:27] wire mask_bit_1 = io_in_b_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit_1 = ~mask_bit_1; // @[Misc.scala:210:26, :211:20] wire mask_eq_8 = mask_sub_0_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_8 = mask_eq_8; // @[Misc.scala:214:27, :215:38] wire mask_eq_9 = mask_sub_0_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_9 = mask_eq_9; // @[Misc.scala:214:27, :215:38] wire mask_eq_10 = mask_sub_1_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_10 = mask_eq_10; // @[Misc.scala:214:27, :215:38] wire mask_eq_11 = mask_sub_1_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_11 = mask_eq_11; // @[Misc.scala:214:27, :215:38] wire mask_eq_12 = mask_sub_2_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_12 = mask_eq_12; // @[Misc.scala:214:27, :215:38] wire mask_eq_13 = mask_sub_2_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_13 = mask_eq_13; // @[Misc.scala:214:27, :215:38] wire mask_eq_14 = mask_sub_3_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_14 = mask_eq_14; // @[Misc.scala:214:27, :215:38] wire mask_eq_15 = mask_sub_3_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_15 = mask_eq_15; // @[Misc.scala:214:27, :215:38] wire _legal_source_WIRE_0 = _legal_source_T; // @[Parameters.scala:1138:31] wire [1:0] legal_source_uncommonBits = _legal_source_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire _legal_source_T_2 = _legal_source_T_1 == 4'h0; // @[Parameters.scala:54:{10,32}] wire _legal_source_T_4 = _legal_source_T_2; // @[Parameters.scala:54:{32,67}] wire _legal_source_T_6 = _legal_source_T_4; // @[Parameters.scala:54:67, :56:48] wire _legal_source_WIRE_1 = _legal_source_T_6; // @[Parameters.scala:1138:31] wire [1:0] legal_source_uncommonBits_1 = _legal_source_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire _legal_source_T_8 = _legal_source_T_7 == 4'h1; // @[Parameters.scala:54:{10,32}] wire _legal_source_T_10 = _legal_source_T_8; // @[Parameters.scala:54:{32,67}] wire _legal_source_T_12 = _legal_source_T_10; // @[Parameters.scala:54:67, :56:48] wire _legal_source_WIRE_2 = _legal_source_T_12; // @[Parameters.scala:1138:31] wire [1:0] legal_source_uncommonBits_2 = _legal_source_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire _legal_source_T_14 = _legal_source_T_13 == 4'h2; // @[Parameters.scala:54:{10,32}] wire _legal_source_T_16 = _legal_source_T_14; // @[Parameters.scala:54:{32,67}] wire _legal_source_T_18 = _legal_source_T_16; // @[Parameters.scala:54:67, :56:48] wire _legal_source_WIRE_3 = _legal_source_T_18; // @[Parameters.scala:1138:31] wire [1:0] legal_source_uncommonBits_3 = _legal_source_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire _legal_source_T_20 = _legal_source_T_19 == 4'h3; // @[Parameters.scala:54:{10,32}] wire _legal_source_T_22 = _legal_source_T_20; // @[Parameters.scala:54:{32,67}] wire _legal_source_T_24 = _legal_source_T_22; // @[Parameters.scala:54:67, :56:48] wire _legal_source_WIRE_4 = _legal_source_T_24; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_5 = _legal_source_T_25; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_6 = _legal_source_T_26; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_7 = _legal_source_T_27; // @[Parameters.scala:1138:31] wire _legal_source_WIRE_8 = _legal_source_T_28; // @[Parameters.scala:1138:31] wire [4:0] _legal_source_T_29 = {_legal_source_WIRE_0, 4'h0}; // @[Mux.scala:30:73] wire [4:0] _legal_source_T_38 = _legal_source_T_29; // @[Mux.scala:30:73] wire [2:0] _legal_source_T_31 = {_legal_source_WIRE_2, 2'h0}; // @[Mux.scala:30:73] wire [3:0] _legal_source_T_32 = {_legal_source_WIRE_3, 3'h0}; // @[Mux.scala:30:73] wire [3:0] _legal_source_T_33 = _legal_source_WIRE_4 ? 4'hC : 4'h0; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_34 = _legal_source_WIRE_5 ? 6'h24 : 6'h0; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_35 = _legal_source_WIRE_6 ? 6'h26 : 6'h0; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_36 = {_legal_source_WIRE_7, 5'h0}; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_37 = _legal_source_WIRE_8 ? 6'h22 : 6'h0; // @[Mux.scala:30:73] wire [4:0] _legal_source_T_39 = {_legal_source_T_38[4:3], _legal_source_T_38[2:0] | _legal_source_T_31}; // @[Mux.scala:30:73] wire [4:0] _legal_source_T_40 = {_legal_source_T_39[4], _legal_source_T_39[3:0] | _legal_source_T_32}; // @[Mux.scala:30:73] wire [4:0] _legal_source_T_41 = {_legal_source_T_40[4], _legal_source_T_40[3:0] | _legal_source_T_33}; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_42 = {1'h0, _legal_source_T_41} | _legal_source_T_34; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_43 = _legal_source_T_42 | _legal_source_T_35; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_44 = _legal_source_T_43 | _legal_source_T_36; // @[Mux.scala:30:73] wire [5:0] _legal_source_T_45 = _legal_source_T_44 | _legal_source_T_37; // @[Mux.scala:30:73] wire [5:0] _legal_source_WIRE_1_0 = _legal_source_T_45; // @[Mux.scala:30:73] wire legal_source = _legal_source_WIRE_1_0 == io_in_b_bits_source_0; // @[Mux.scala:30:73] wire [1:0] uncommonBits_48 = _uncommonBits_T_48[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_49 = _uncommonBits_T_49[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_72 = io_in_c_bits_source_0 == 6'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_0 = _source_ok_T_72; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _source_ok_T_73 = io_in_c_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_79 = io_in_c_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_85 = io_in_c_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_91 = io_in_c_bits_source_0[5:2]; // @[Monitor.scala:36:7] wire _source_ok_T_74 = _source_ok_T_73 == 4'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_76 = _source_ok_T_74; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_78 = _source_ok_T_76; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2_1 = _source_ok_T_78; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_80 = _source_ok_T_79 == 4'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_82 = _source_ok_T_80; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_84 = _source_ok_T_82; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2_2 = _source_ok_T_84; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_10 = _source_ok_uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_86 = _source_ok_T_85 == 4'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_88 = _source_ok_T_86; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_90 = _source_ok_T_88; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2_3 = _source_ok_T_90; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_11 = _source_ok_uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_92 = _source_ok_T_91 == 4'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_94 = _source_ok_T_92; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_96 = _source_ok_T_94; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2_4 = _source_ok_T_96; // @[Parameters.scala:1138:31] wire _source_ok_T_97 = io_in_c_bits_source_0 == 6'h24; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_5 = _source_ok_T_97; // @[Parameters.scala:1138:31] wire _source_ok_T_98 = io_in_c_bits_source_0 == 6'h26; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_6 = _source_ok_T_98; // @[Parameters.scala:1138:31] wire _source_ok_T_99 = io_in_c_bits_source_0 == 6'h20; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_7 = _source_ok_T_99; // @[Parameters.scala:1138:31] wire _source_ok_T_100 = io_in_c_bits_source_0 == 6'h22; // @[Monitor.scala:36:7] wire _source_ok_WIRE_2_8 = _source_ok_T_100; // @[Parameters.scala:1138:31] wire _source_ok_T_101 = _source_ok_WIRE_2_0 | _source_ok_WIRE_2_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_102 = _source_ok_T_101 | _source_ok_WIRE_2_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_103 = _source_ok_T_102 | _source_ok_WIRE_2_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_104 = _source_ok_T_103 | _source_ok_WIRE_2_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_105 = _source_ok_T_104 | _source_ok_WIRE_2_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_106 = _source_ok_T_105 | _source_ok_WIRE_2_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_107 = _source_ok_T_106 | _source_ok_WIRE_2_7; // @[Parameters.scala:1138:31, :1139:46] wire source_ok_2 = _source_ok_T_107 | _source_ok_WIRE_2_8; // @[Parameters.scala:1138:31, :1139:46] wire [12:0] _GEN_1 = 13'h3F << io_in_c_bits_size_0; // @[package.scala:243:71] wire [12:0] _is_aligned_mask_T_4; // @[package.scala:243:71] assign _is_aligned_mask_T_4 = _GEN_1; // @[package.scala:243:71] wire [12:0] _c_first_beats1_decode_T; // @[package.scala:243:71] assign _c_first_beats1_decode_T = _GEN_1; // @[package.scala:243:71] wire [12:0] _c_first_beats1_decode_T_3; // @[package.scala:243:71] assign _c_first_beats1_decode_T_3 = _GEN_1; // @[package.scala:243:71] wire [5:0] _is_aligned_mask_T_5 = _is_aligned_mask_T_4[5:0]; // @[package.scala:243:{71,76}] wire [5:0] is_aligned_mask_2 = ~_is_aligned_mask_T_5; // @[package.scala:243:{46,76}] wire [31:0] _is_aligned_T_2 = {26'h0, io_in_c_bits_address_0[5:0] & is_aligned_mask_2}; // @[package.scala:243:46] wire is_aligned_2 = _is_aligned_T_2 == 32'h0; // @[Edges.scala:21:{16,24}] wire [27:0] _GEN_2 = io_in_c_bits_address_0[27:0] ^ 28'h8000000; // @[Monitor.scala:36:7] wire [31:0] _address_ok_T_10 = {io_in_c_bits_address_0[31:28], _GEN_2}; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_11 = {1'h0, _address_ok_T_10}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_12 = _address_ok_T_11 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_13 = _address_ok_T_12; // @[Parameters.scala:137:46] wire _address_ok_T_14 = _address_ok_T_13 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_1_0 = _address_ok_T_14; // @[Parameters.scala:612:40] wire [31:0] _address_ok_T_15 = io_in_c_bits_address_0 ^ 32'h80000000; // @[Monitor.scala:36:7] wire [32:0] _address_ok_T_16 = {1'h0, _address_ok_T_15}; // @[Parameters.scala:137:{31,41}] wire [32:0] _address_ok_T_17 = _address_ok_T_16 & 33'h1F0000000; // @[Parameters.scala:137:{41,46}] wire [32:0] _address_ok_T_18 = _address_ok_T_17; // @[Parameters.scala:137:46] wire _address_ok_T_19 = _address_ok_T_18 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _address_ok_WIRE_1_1 = _address_ok_T_19; // @[Parameters.scala:612:40] wire address_ok_1 = _address_ok_WIRE_1_0 | _address_ok_WIRE_1_1; // @[Parameters.scala:612:40, :636:64] wire [1:0] uncommonBits_52 = _uncommonBits_T_52[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_53 = _uncommonBits_T_53[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_54 = _uncommonBits_T_54[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_55 = _uncommonBits_T_55[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_56 = _uncommonBits_T_56[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_57 = _uncommonBits_T_57[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_58 = _uncommonBits_T_58[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_59 = _uncommonBits_T_59[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_60 = _uncommonBits_T_60[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_61 = _uncommonBits_T_61[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_62 = _uncommonBits_T_62[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_63 = _uncommonBits_T_63[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_64 = _uncommonBits_T_64[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_65 = _uncommonBits_T_65[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_66 = _uncommonBits_T_66[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_67 = _uncommonBits_T_67[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_68 = _uncommonBits_T_68[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_69 = _uncommonBits_T_69[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_70 = _uncommonBits_T_70[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_71 = _uncommonBits_T_71[1:0]; // @[Parameters.scala:52:{29,56}] wire sink_ok_1 = io_in_e_bits_sink_0 != 3'h7; // @[Monitor.scala:36:7, :367:31] wire _T_2142 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_2142; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_2142; // @[Decoupled.scala:51:35] wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [5:0] source; // @[Monitor.scala:390:22] reg [31:0] address; // @[Monitor.scala:391:22] wire _T_2216 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_2216; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_2216; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_2216; // @[Decoupled.scala:51:35] wire _d_first_T_3; // @[Decoupled.scala:51:35] assign _d_first_T_3 = _T_2216; // @[Decoupled.scala:51:35] wire [12:0] _GEN_3 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_3; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_3; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_3; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_9; // @[package.scala:243:71] assign _d_first_beats1_decode_T_9 = _GEN_3; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_3 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [5:0] source_1; // @[Monitor.scala:541:22] reg [2:0] sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] wire _b_first_T = io_in_b_ready_0 & io_in_b_valid_0; // @[Decoupled.scala:51:35] wire b_first_done = _b_first_T; // @[Decoupled.scala:51:35] reg [2:0] b_first_counter; // @[Edges.scala:229:27] wire [3:0] _b_first_counter1_T = {1'h0, b_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] b_first_counter1 = _b_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire b_first = b_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _b_first_last_T = b_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire [2:0] _b_first_count_T = ~b_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] _b_first_counter_T = b_first ? 3'h0 : b_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21] reg [1:0] param_2; // @[Monitor.scala:411:22] reg [5:0] source_2; // @[Monitor.scala:413:22] reg [31:0] address_1; // @[Monitor.scala:414:22] wire _T_2213 = io_in_c_ready_0 & io_in_c_valid_0; // @[Decoupled.scala:51:35] wire _c_first_T; // @[Decoupled.scala:51:35] assign _c_first_T = _T_2213; // @[Decoupled.scala:51:35] wire _c_first_T_1; // @[Decoupled.scala:51:35] assign _c_first_T_1 = _T_2213; // @[Decoupled.scala:51:35] wire [5:0] _c_first_beats1_decode_T_1 = _c_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _c_first_beats1_decode_T_2 = ~_c_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] c_first_beats1_decode = _c_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire c_first_beats1_opdata = io_in_c_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire c_first_beats1_opdata_1 = io_in_c_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [2:0] c_first_beats1 = c_first_beats1_opdata ? c_first_beats1_decode : 3'h0; // @[Edges.scala:102:36, :220:59, :221:14] reg [2:0] c_first_counter; // @[Edges.scala:229:27] wire [3:0] _c_first_counter1_T = {1'h0, c_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] c_first_counter1 = _c_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire c_first = c_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _c_first_last_T = c_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _c_first_last_T_1 = c_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire c_first_last = _c_first_last_T | _c_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire c_first_done = c_first_last & _c_first_T; // @[Decoupled.scala:51:35] wire [2:0] _c_first_count_T = ~c_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] c_first_count = c_first_beats1 & _c_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _c_first_counter_T = c_first ? c_first_beats1 : c_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_3; // @[Monitor.scala:515:22] reg [2:0] param_3; // @[Monitor.scala:516:22] reg [2:0] size_3; // @[Monitor.scala:517:22] reg [5:0] source_3; // @[Monitor.scala:518:22] reg [31:0] address_2; // @[Monitor.scala:519:22] reg [38:0] inflight; // @[Monitor.scala:614:27] reg [155:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [155:0] inflight_sizes; // @[Monitor.scala:618:33] wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [38:0] a_set; // @[Monitor.scala:626:34] wire [38:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [155:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [155:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [8:0] _GEN_4 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [8:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_4; // @[Monitor.scala:637:69] wire [8:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_4; // @[Monitor.scala:637:69, :641:65] wire [8:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_4; // @[Monitor.scala:637:69, :680:101] wire [8:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_4; // @[Monitor.scala:637:69, :681:99] wire [8:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_4; // @[Monitor.scala:637:69, :749:69] wire [8:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_4; // @[Monitor.scala:637:69, :750:67] wire [8:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_4; // @[Monitor.scala:637:69, :790:101] wire [8:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_4; // @[Monitor.scala:637:69, :791:99] wire [155:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [155:0] _a_opcode_lookup_T_6 = {152'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [155:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[155:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [3:0] a_size_lookup; // @[Monitor.scala:639:33] wire [155:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [155:0] _a_size_lookup_T_6 = {152'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}] wire [155:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[155:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [63:0] _GEN_5 = 64'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [63:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35] wire [63:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_5; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[38:0] : 39'h0; // @[OneHot.scala:58:35] wire _T_2068 = _T_2142 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_2068 ? _a_set_T[38:0] : 39'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_2068 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_2068 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [8:0] _GEN_6 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [8:0] _a_opcodes_set_T; // @[Monitor.scala:659:79] assign _a_opcodes_set_T = _GEN_6; // @[Monitor.scala:659:79] wire [8:0] _a_sizes_set_T; // @[Monitor.scala:660:77] assign _a_sizes_set_T = _GEN_6; // @[Monitor.scala:659:79, :660:77] wire [514:0] _a_opcodes_set_T_1 = {511'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_2068 ? _a_opcodes_set_T_1[155:0] : 156'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [514:0] _a_sizes_set_T_1 = {511'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_2068 ? _a_sizes_set_T_1[155:0] : 156'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [38:0] d_clr; // @[Monitor.scala:664:34] wire [38:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [155:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [155:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_7 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_7; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_7; // @[Monitor.scala:673:46, :783:46] wire _T_2114 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [63:0] _GEN_8 = 64'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [63:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_8; // @[OneHot.scala:58:35] wire [63:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_8; // @[OneHot.scala:58:35] wire [63:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_8; // @[OneHot.scala:58:35] wire [63:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_8; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_2114 & ~d_release_ack ? _d_clr_wo_ready_T[38:0] : 39'h0; // @[OneHot.scala:58:35] wire _T_2083 = _T_2216 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_2083 ? _d_clr_T[38:0] : 39'h0; // @[OneHot.scala:58:35] wire [526:0] _d_opcodes_clr_T_5 = 527'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_2083 ? _d_opcodes_clr_T_5[155:0] : 156'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [526:0] _d_sizes_clr_T_5 = 527'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_2083 ? _d_sizes_clr_T_5[155:0] : 156'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [38:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [38:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [38:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [155:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [155:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [155:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [155:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [155:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [155:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [38:0] inflight_1; // @[Monitor.scala:726:35] reg [155:0] inflight_opcodes_1; // @[Monitor.scala:727:35] reg [155:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [5:0] _c_first_beats1_decode_T_4 = _c_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _c_first_beats1_decode_T_5 = ~_c_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] c_first_beats1_decode_1 = _c_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire [2:0] c_first_beats1_1 = c_first_beats1_opdata_1 ? c_first_beats1_decode_1 : 3'h0; // @[Edges.scala:102:36, :220:59, :221:14] reg [2:0] c_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _c_first_counter1_T_1 = {1'h0, c_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] c_first_counter1_1 = _c_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire c_first_1 = c_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _c_first_last_T_2 = c_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _c_first_last_T_3 = c_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire c_first_last_1 = _c_first_last_T_2 | _c_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire c_first_done_1 = c_first_last_1 & _c_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _c_first_count_T_1 = ~c_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] c_first_count_1 = c_first_beats1_1 & _c_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _c_first_counter_T_1 = c_first_1 ? c_first_beats1_1 : c_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [38:0] c_set; // @[Monitor.scala:738:34] wire [38:0] c_set_wo_ready; // @[Monitor.scala:739:34] wire [155:0] c_opcodes_set; // @[Monitor.scala:740:34] wire [155:0] c_sizes_set; // @[Monitor.scala:741:34] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [3:0] c_size_lookup; // @[Monitor.scala:748:35] wire [155:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [155:0] _c_opcode_lookup_T_6 = {152'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [155:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[155:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [155:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [155:0] _c_size_lookup_T_6 = {152'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}] wire [155:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[155:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [3:0] c_opcodes_set_interm; // @[Monitor.scala:754:40] wire [3:0] c_sizes_set_interm; // @[Monitor.scala:755:40] wire _same_cycle_resp_T_3 = io_in_c_valid_0 & c_first_1; // @[Monitor.scala:36:7, :759:26, :795:44] wire _same_cycle_resp_T_4 = io_in_c_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _same_cycle_resp_T_5 = io_in_c_bits_opcode_0[1]; // @[Monitor.scala:36:7] wire [63:0] _GEN_9 = 64'h1 << io_in_c_bits_source_0; // @[OneHot.scala:58:35] wire [63:0] _c_set_wo_ready_T; // @[OneHot.scala:58:35] assign _c_set_wo_ready_T = _GEN_9; // @[OneHot.scala:58:35] wire [63:0] _c_set_T; // @[OneHot.scala:58:35] assign _c_set_T = _GEN_9; // @[OneHot.scala:58:35] assign c_set_wo_ready = _same_cycle_resp_T_3 & _same_cycle_resp_T_4 & _same_cycle_resp_T_5 ? _c_set_wo_ready_T[38:0] : 39'h0; // @[OneHot.scala:58:35] wire _T_2155 = _T_2213 & c_first_1 & _same_cycle_resp_T_4 & _same_cycle_resp_T_5; // @[Decoupled.scala:51:35] assign c_set = _T_2155 ? _c_set_T[38:0] : 39'h0; // @[OneHot.scala:58:35] wire [3:0] _c_opcodes_set_interm_T = {io_in_c_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :765:53] wire [3:0] _c_opcodes_set_interm_T_1 = {_c_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:765:{53,61}] assign c_opcodes_set_interm = _T_2155 ? _c_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:754:40, :763:{25,36,70}, :765:{28,61}] wire [3:0] _c_sizes_set_interm_T = {io_in_c_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :766:51] wire [3:0] _c_sizes_set_interm_T_1 = {_c_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:766:{51,59}] assign c_sizes_set_interm = _T_2155 ? _c_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:755:40, :763:{25,36,70}, :766:{28,59}] wire [8:0] _GEN_10 = {1'h0, io_in_c_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :767:79] wire [8:0] _c_opcodes_set_T; // @[Monitor.scala:767:79] assign _c_opcodes_set_T = _GEN_10; // @[Monitor.scala:767:79] wire [8:0] _c_sizes_set_T; // @[Monitor.scala:768:77] assign _c_sizes_set_T = _GEN_10; // @[Monitor.scala:767:79, :768:77] wire [514:0] _c_opcodes_set_T_1 = {511'h0, c_opcodes_set_interm} << _c_opcodes_set_T; // @[Monitor.scala:659:54, :754:40, :767:{54,79}] assign c_opcodes_set = _T_2155 ? _c_opcodes_set_T_1[155:0] : 156'h0; // @[Monitor.scala:740:34, :763:{25,36,70}, :767:{28,54}] wire [514:0] _c_sizes_set_T_1 = {511'h0, c_sizes_set_interm} << _c_sizes_set_T; // @[Monitor.scala:659:54, :755:40, :768:{52,77}] assign c_sizes_set = _T_2155 ? _c_sizes_set_T_1[155:0] : 156'h0; // @[Monitor.scala:741:34, :763:{25,36,70}, :768:{28,52}] wire _c_probe_ack_T = io_in_c_bits_opcode_0 == 3'h4; // @[Monitor.scala:36:7, :772:47] wire _c_probe_ack_T_1 = io_in_c_bits_opcode_0 == 3'h5; // @[Monitor.scala:36:7, :772:95] wire c_probe_ack = _c_probe_ack_T | _c_probe_ack_T_1; // @[Monitor.scala:772:{47,71,95}] wire [38:0] d_clr_1; // @[Monitor.scala:774:34] wire [38:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [155:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [155:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_2186 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_2186 & d_release_ack_1 ? _d_clr_wo_ready_T_1[38:0] : 39'h0; // @[OneHot.scala:58:35] wire _T_2168 = _T_2216 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_2168 ? _d_clr_T_1[38:0] : 39'h0; // @[OneHot.scala:58:35] wire [526:0] _d_opcodes_clr_T_11 = 527'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_2168 ? _d_opcodes_clr_T_11[155:0] : 156'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [526:0] _d_sizes_clr_T_11 = 527'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_2168 ? _d_sizes_clr_T_11[155:0] : 156'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_6 = _same_cycle_resp_T_4 & _same_cycle_resp_T_5; // @[Edges.scala:68:{36,40,51}] wire _same_cycle_resp_T_7 = _same_cycle_resp_T_3 & _same_cycle_resp_T_6; // @[Monitor.scala:795:{44,55}] wire _same_cycle_resp_T_8 = io_in_c_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :795:113] wire same_cycle_resp_1 = _same_cycle_resp_T_7 & _same_cycle_resp_T_8; // @[Monitor.scala:795:{55,88,113}] wire [38:0] _inflight_T_3 = inflight_1 | c_set; // @[Monitor.scala:726:35, :738:34, :814:35] wire [38:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [38:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [155:0] _inflight_opcodes_T_3 = inflight_opcodes_1 | c_opcodes_set; // @[Monitor.scala:727:35, :740:34, :815:43] wire [155:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [155:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [155:0] _inflight_sizes_T_3 = inflight_sizes_1 | c_sizes_set; // @[Monitor.scala:728:35, :741:34, :816:41] wire [155:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [155:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27] wire [32:0] _watchdog_T_2 = {1'h0, watchdog_1} + 33'h1; // @[Monitor.scala:818:27, :823:26] wire [31:0] _watchdog_T_3 = _watchdog_T_2[31:0]; // @[Monitor.scala:823:26] reg [6:0] inflight_2; // @[Monitor.scala:828:27] wire [5:0] _d_first_beats1_decode_T_10 = _d_first_beats1_decode_T_9[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_11 = ~_d_first_beats1_decode_T_10; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_3 = _d_first_beats1_decode_T_11[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_3 = d_first_beats1_opdata_3 ? d_first_beats1_decode_3 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_3; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_3 = {1'h0, d_first_counter_3} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_3 = _d_first_counter1_T_3[2:0]; // @[Edges.scala:230:28] wire d_first_3 = d_first_counter_3 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_6 = d_first_counter_3 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_7 = d_first_beats1_3 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_3 = _d_first_last_T_6 | _d_first_last_T_7; // @[Edges.scala:232:{25,33,43}] wire d_first_done_3 = d_first_last_3 & _d_first_T_3; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_3 = ~d_first_counter1_3; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_3 = d_first_beats1_3 & _d_first_count_T_3; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_3 = d_first_3 ? d_first_beats1_3 : d_first_counter1_3; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [6:0] d_set; // @[Monitor.scala:833:25] wire _T_2222 = _T_2216 & d_first_3 & io_in_d_bits_opcode_0[2] & ~(io_in_d_bits_opcode_0[1]); // @[Decoupled.scala:51:35] wire [7:0] _d_set_T = 8'h1 << io_in_d_bits_sink_0; // @[OneHot.scala:58:35] assign d_set = _T_2222 ? _d_set_T[6:0] : 7'h0; // @[OneHot.scala:58:35] wire [6:0] e_clr; // @[Monitor.scala:839:25] wire [7:0] _e_clr_T = 8'h1 << io_in_e_bits_sink_0; // @[OneHot.scala:58:35] assign e_clr = io_in_e_valid_0 ? _e_clr_T[6:0] : 7'h0; // @[OneHot.scala:58:35]
Generate the Verilog code corresponding to the following Chisel files. File Switch.scala: package constellation.router import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.util._ import constellation.channel.{ChannelParams, IngressChannelParams, EgressChannelParams, Flit} class SwitchBundle(val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Bundle with HasRouterOutputParams{ val flit = new Flit(allOutParams(0).payloadBits) val out_virt_channel = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W) } class Switch( val routerParams: RouterParams, val inParams: Seq[ChannelParams], val outParams: Seq[ChannelParams], val ingressParams: Seq[IngressChannelParams], val egressParams: Seq[EgressChannelParams] )(implicit val p: Parameters) extends Module with HasRouterParams with HasRouterInputParams with HasRouterOutputParams { val io = IO(new Bundle { val in = MixedVec(allInParams.map { u => Vec(u.destSpeedup, Input(Valid(new SwitchBundle(outParams, egressParams)))) }) val out = MixedVec(allOutParams.map { u => Vec(u.srcSpeedup, Output(Valid(new Flit(u.payloadBits)))) }) val sel = MixedVec(allOutParams.map { o => Vec(o.srcSpeedup, MixedVec(allInParams.map { i => Vec(i.destSpeedup, Input(Bool())) })) }) }) val in_flat = Wire(Vec(allInParams.map(_.destSpeedup).reduce(_+_), Valid(new SwitchBundle(outParams, egressParams)))) var idx = 0 io.in.foreach(_.foreach { i => in_flat(idx) := i idx += 1 }) for (i <- 0 until nAllOutputs) { for (j <- 0 until allOutParams(i).srcSpeedup) { val sel_flat = io.sel(i)(j).asUInt assert(PopCount(sel_flat) <= 1.U) io.out(i)(j).valid := Mux1H(sel_flat, in_flat.map(_.valid)) && sel_flat =/= 0.U io.out(i)(j).bits := Mux1H(sel_flat, in_flat.map(_.bits.flit)) io.out(i)(j).bits.virt_channel_id := Mux1H(sel_flat, in_flat.map(_.bits.out_virt_channel)) } } }
module Switch_15( // @[Switch.scala:16:7] input clock, // @[Switch.scala:16:7] input reset, // @[Switch.scala:16:7] input io_in_3_0_valid, // @[Switch.scala:27:14] input io_in_3_0_bits_flit_head, // @[Switch.scala:27:14] input io_in_3_0_bits_flit_tail, // @[Switch.scala:27:14] input [72:0] io_in_3_0_bits_flit_payload, // @[Switch.scala:27:14] input [2:0] io_in_3_0_bits_flit_flow_vnet_id, // @[Switch.scala:27:14] input [4:0] io_in_3_0_bits_flit_flow_ingress_node, // @[Switch.scala:27:14] input [1:0] io_in_3_0_bits_flit_flow_ingress_node_id, // @[Switch.scala:27:14] input [4:0] io_in_3_0_bits_flit_flow_egress_node, // @[Switch.scala:27:14] input [1:0] io_in_3_0_bits_flit_flow_egress_node_id, // @[Switch.scala:27:14] input [2:0] io_in_3_0_bits_out_virt_channel, // @[Switch.scala:27:14] input io_in_2_0_valid, // @[Switch.scala:27:14] input io_in_2_0_bits_flit_head, // @[Switch.scala:27:14] input io_in_2_0_bits_flit_tail, // @[Switch.scala:27:14] input [72:0] io_in_2_0_bits_flit_payload, // @[Switch.scala:27:14] input [2:0] io_in_2_0_bits_flit_flow_vnet_id, // @[Switch.scala:27:14] input [4:0] io_in_2_0_bits_flit_flow_ingress_node, // @[Switch.scala:27:14] input [1:0] io_in_2_0_bits_flit_flow_ingress_node_id, // @[Switch.scala:27:14] input [4:0] io_in_2_0_bits_flit_flow_egress_node, // @[Switch.scala:27:14] input [1:0] io_in_2_0_bits_flit_flow_egress_node_id, // @[Switch.scala:27:14] input [2:0] io_in_2_0_bits_out_virt_channel, // @[Switch.scala:27:14] input io_in_1_0_valid, // @[Switch.scala:27:14] input io_in_1_0_bits_flit_head, // @[Switch.scala:27:14] input io_in_1_0_bits_flit_tail, // @[Switch.scala:27:14] input [72:0] io_in_1_0_bits_flit_payload, // @[Switch.scala:27:14] input [2:0] io_in_1_0_bits_flit_flow_vnet_id, // @[Switch.scala:27:14] input [4:0] io_in_1_0_bits_flit_flow_ingress_node, // @[Switch.scala:27:14] input [1:0] io_in_1_0_bits_flit_flow_ingress_node_id, // @[Switch.scala:27:14] input [4:0] io_in_1_0_bits_flit_flow_egress_node, // @[Switch.scala:27:14] input [1:0] io_in_1_0_bits_flit_flow_egress_node_id, // @[Switch.scala:27:14] input [2:0] io_in_1_0_bits_out_virt_channel, // @[Switch.scala:27:14] input io_in_0_0_valid, // @[Switch.scala:27:14] input io_in_0_0_bits_flit_head, // @[Switch.scala:27:14] input io_in_0_0_bits_flit_tail, // @[Switch.scala:27:14] input [72:0] io_in_0_0_bits_flit_payload, // @[Switch.scala:27:14] input [2:0] io_in_0_0_bits_flit_flow_vnet_id, // @[Switch.scala:27:14] input [4:0] io_in_0_0_bits_flit_flow_ingress_node, // @[Switch.scala:27:14] input [1:0] io_in_0_0_bits_flit_flow_ingress_node_id, // @[Switch.scala:27:14] input [4:0] io_in_0_0_bits_flit_flow_egress_node, // @[Switch.scala:27:14] input [1:0] io_in_0_0_bits_flit_flow_egress_node_id, // @[Switch.scala:27:14] input [2:0] io_in_0_0_bits_out_virt_channel, // @[Switch.scala:27:14] output io_out_3_0_valid, // @[Switch.scala:27:14] output io_out_3_0_bits_head, // @[Switch.scala:27:14] output io_out_3_0_bits_tail, // @[Switch.scala:27:14] output [72:0] io_out_3_0_bits_payload, // @[Switch.scala:27:14] output [2:0] io_out_3_0_bits_flow_vnet_id, // @[Switch.scala:27:14] output [4:0] io_out_3_0_bits_flow_ingress_node, // @[Switch.scala:27:14] output [1:0] io_out_3_0_bits_flow_ingress_node_id, // @[Switch.scala:27:14] output [4:0] io_out_3_0_bits_flow_egress_node, // @[Switch.scala:27:14] output [1:0] io_out_3_0_bits_flow_egress_node_id, // @[Switch.scala:27:14] output [2:0] io_out_3_0_bits_virt_channel_id, // @[Switch.scala:27:14] output io_out_2_0_valid, // @[Switch.scala:27:14] output io_out_2_0_bits_head, // @[Switch.scala:27:14] output io_out_2_0_bits_tail, // @[Switch.scala:27:14] output [72:0] io_out_2_0_bits_payload, // @[Switch.scala:27:14] output [2:0] io_out_2_0_bits_flow_vnet_id, // @[Switch.scala:27:14] output [4:0] io_out_2_0_bits_flow_ingress_node, // @[Switch.scala:27:14] output [1:0] io_out_2_0_bits_flow_ingress_node_id, // @[Switch.scala:27:14] output [4:0] io_out_2_0_bits_flow_egress_node, // @[Switch.scala:27:14] output [1:0] io_out_2_0_bits_flow_egress_node_id, // @[Switch.scala:27:14] output [2:0] io_out_2_0_bits_virt_channel_id, // @[Switch.scala:27:14] output io_out_1_0_valid, // @[Switch.scala:27:14] output io_out_1_0_bits_head, // @[Switch.scala:27:14] output io_out_1_0_bits_tail, // @[Switch.scala:27:14] output [72:0] io_out_1_0_bits_payload, // @[Switch.scala:27:14] output [2:0] io_out_1_0_bits_flow_vnet_id, // @[Switch.scala:27:14] output [4:0] io_out_1_0_bits_flow_ingress_node, // @[Switch.scala:27:14] output [1:0] io_out_1_0_bits_flow_ingress_node_id, // @[Switch.scala:27:14] output [4:0] io_out_1_0_bits_flow_egress_node, // @[Switch.scala:27:14] output [1:0] io_out_1_0_bits_flow_egress_node_id, // @[Switch.scala:27:14] output [2:0] io_out_1_0_bits_virt_channel_id, // @[Switch.scala:27:14] output io_out_0_0_valid, // @[Switch.scala:27:14] output io_out_0_0_bits_head, // @[Switch.scala:27:14] output io_out_0_0_bits_tail, // @[Switch.scala:27:14] output [72:0] io_out_0_0_bits_payload, // @[Switch.scala:27:14] output [2:0] io_out_0_0_bits_flow_vnet_id, // @[Switch.scala:27:14] output [4:0] io_out_0_0_bits_flow_ingress_node, // @[Switch.scala:27:14] output [1:0] io_out_0_0_bits_flow_ingress_node_id, // @[Switch.scala:27:14] output [4:0] io_out_0_0_bits_flow_egress_node, // @[Switch.scala:27:14] output [1:0] io_out_0_0_bits_flow_egress_node_id, // @[Switch.scala:27:14] output [2:0] io_out_0_0_bits_virt_channel_id, // @[Switch.scala:27:14] input io_sel_3_0_3_0, // @[Switch.scala:27:14] input io_sel_3_0_2_0, // @[Switch.scala:27:14] input io_sel_3_0_1_0, // @[Switch.scala:27:14] input io_sel_3_0_0_0, // @[Switch.scala:27:14] input io_sel_2_0_3_0, // @[Switch.scala:27:14] input io_sel_2_0_2_0, // @[Switch.scala:27:14] input io_sel_2_0_1_0, // @[Switch.scala:27:14] input io_sel_2_0_0_0, // @[Switch.scala:27:14] input io_sel_1_0_3_0, // @[Switch.scala:27:14] input io_sel_1_0_2_0, // @[Switch.scala:27:14] input io_sel_1_0_1_0, // @[Switch.scala:27:14] input io_sel_1_0_0_0, // @[Switch.scala:27:14] input io_sel_0_0_3_0, // @[Switch.scala:27:14] input io_sel_0_0_2_0, // @[Switch.scala:27:14] input io_sel_0_0_1_0, // @[Switch.scala:27:14] input io_sel_0_0_0_0 // @[Switch.scala:27:14] );
Generate the Verilog code corresponding to the following Chisel files. File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File DCache.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import freechips.rocketchip.amba.AMBAProt import freechips.rocketchip.diplomacy.{BufferParams} import freechips.rocketchip.prci.{ClockCrossingType, RationalCrossing, SynchronousCrossing, AsynchronousCrossing, CreditedCrossing} import freechips.rocketchip.tile.{CoreBundle, LookupByHartId} import freechips.rocketchip.tilelink.{TLFIFOFixer,ClientMetadata, TLBundleA, TLAtomics, TLBundleB, TLPermissions} import freechips.rocketchip.tilelink.TLMessages.{AccessAck, HintAck, AccessAckData, Grant, GrantData, ReleaseAck} import freechips.rocketchip.util.{CanHaveErrors, ClockGate, IdentityCode, ReplacementPolicy, DescribedSRAM, property} import freechips.rocketchip.util.BooleanToAugmentedBoolean import freechips.rocketchip.util.UIntToAugmentedUInt import freechips.rocketchip.util.UIntIsOneOf import freechips.rocketchip.util.IntToAugmentedInt import freechips.rocketchip.util.SeqToAugmentedSeq import freechips.rocketchip.util.SeqBoolBitwiseOps // TODO: delete this trait once deduplication is smart enough to avoid globally inlining matching circuits trait InlineInstance { self: chisel3.experimental.BaseModule => chisel3.experimental.annotate( new chisel3.experimental.ChiselAnnotation { def toFirrtl: firrtl.annotations.Annotation = firrtl.passes.InlineAnnotation(self.toNamed) } ) } class DCacheErrors(implicit p: Parameters) extends L1HellaCacheBundle()(p) with CanHaveErrors { val correctable = (cacheParams.tagCode.canCorrect || cacheParams.dataCode.canCorrect).option(Valid(UInt(paddrBits.W))) val uncorrectable = (cacheParams.tagCode.canDetect || cacheParams.dataCode.canDetect).option(Valid(UInt(paddrBits.W))) val bus = Valid(UInt(paddrBits.W)) } class DCacheDataReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) { val addr = UInt(untagBits.W) val write = Bool() val wdata = UInt((encBits * rowBytes / eccBytes).W) val wordMask = UInt((rowBytes / subWordBytes).W) val eccMask = UInt((wordBytes / eccBytes).W) val way_en = UInt(nWays.W) } class DCacheDataArray(implicit p: Parameters) extends L1HellaCacheModule()(p) { val io = IO(new Bundle { val req = Flipped(Valid(new DCacheDataReq)) val resp = Output(Vec(nWays, UInt((req.bits.wdata.getWidth).W))) }) require(rowBits % subWordBits == 0, "rowBits must be a multiple of subWordBits") val eccMask = if (eccBits == subWordBits) Seq(true.B) else io.req.bits.eccMask.asBools val wMask = if (nWays == 1) eccMask else (0 until nWays).flatMap(i => eccMask.map(_ && io.req.bits.way_en(i))) val wWords = io.req.bits.wdata.grouped(encBits * (subWordBits / eccBits)) val addr = io.req.bits.addr >> rowOffBits val data_arrays = Seq.tabulate(rowBits / subWordBits) { i => DescribedSRAM( name = s"${tileParams.baseName}_dcache_data_arrays_${i}", desc = "DCache Data Array", size = nSets * cacheBlockBytes / rowBytes, data = Vec(nWays * (subWordBits / eccBits), UInt(encBits.W)) ) } val rdata = for ((array , i) <- data_arrays.zipWithIndex) yield { val valid = io.req.valid && ((data_arrays.size == 1).B || io.req.bits.wordMask(i)) when (valid && io.req.bits.write) { val wMaskSlice = (0 until wMask.size).filter(j => i % (wordBits/subWordBits) == (j % (wordBytes/eccBytes)) / (subWordBytes/eccBytes)).map(wMask(_)) val wData = wWords(i).grouped(encBits) array.write(addr, VecInit((0 until nWays).flatMap(i => wData)), wMaskSlice) } val data = array.read(addr, valid && !io.req.bits.write) data.grouped(subWordBits / eccBits).map(_.asUInt).toSeq } (io.resp zip rdata.transpose).foreach { case (resp, data) => resp := data.asUInt } } class DCacheMetadataReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) { val write = Bool() val addr = UInt(vaddrBitsExtended.W) val idx = UInt(idxBits.W) val way_en = UInt(nWays.W) val data = UInt(cacheParams.tagCode.width(new L1Metadata().getWidth).W) } class DCache(staticIdForMetadataUseOnly: Int, val crossing: ClockCrossingType)(implicit p: Parameters) extends HellaCache(staticIdForMetadataUseOnly)(p) { override lazy val module = new DCacheModule(this) } class DCacheTLBPort(implicit p: Parameters) extends CoreBundle()(p) { val req = Flipped(Decoupled(new TLBReq(coreDataBytes.log2))) val s1_resp = Output(new TLBResp(coreDataBytes.log2)) val s2_kill = Input(Bool()) } class DCacheModule(outer: DCache) extends HellaCacheModule(outer) { val tECC = cacheParams.tagCode val dECC = cacheParams.dataCode require(subWordBits % eccBits == 0, "subWordBits must be a multiple of eccBits") require(eccBytes == 1 || !dECC.isInstanceOf[IdentityCode]) require(cacheParams.silentDrop || cacheParams.acquireBeforeRelease, "!silentDrop requires acquireBeforeRelease") val usingRMW = eccBytes > 1 || usingAtomicsInCache val mmioOffset = outer.firstMMIO edge.manager.requireFifo(TLFIFOFixer.allVolatile) // TileLink pipelining MMIO requests val clock_en_reg = Reg(Bool()) io.cpu.clock_enabled := clock_en_reg val gated_clock = if (!cacheParams.clockGate) clock else ClockGate(clock, clock_en_reg, "dcache_clock_gate") class DCacheModuleImpl { // entering gated-clock domain val tlb = Module(new TLB(false, log2Ceil(coreDataBytes), TLBConfig(nTLBSets, nTLBWays, cacheParams.nTLBBasePageSectors, cacheParams.nTLBSuperpages))) val pma_checker = Module(new TLB(false, log2Ceil(coreDataBytes), TLBConfig(nTLBSets, nTLBWays, cacheParams.nTLBBasePageSectors, cacheParams.nTLBSuperpages)) with InlineInstance) // tags val replacer = ReplacementPolicy.fromString(cacheParams.replacementPolicy, nWays) /** Metadata Arbiter: * 0: Tag update on reset * 1: Tag update on ECC error * 2: Tag update on hit * 3: Tag update on refill * 4: Tag update on release * 5: Tag update on flush * 6: Tag update on probe * 7: Tag update on CPU request */ val metaArb = Module(new Arbiter(new DCacheMetadataReq, 8) with InlineInstance) val tag_array = DescribedSRAM( name = s"${tileParams.baseName}_dcache_tag_array", desc = "DCache Tag Array", size = nSets, data = Vec(nWays, chiselTypeOf(metaArb.io.out.bits.data)) ) // data val data = Module(new DCacheDataArray) /** Data Arbiter * 0: data from pending store buffer * 1: data from TL-D refill * 2: release to TL-A * 3: hit path to CPU */ val dataArb = Module(new Arbiter(new DCacheDataReq, 4) with InlineInstance) dataArb.io.in.tail.foreach(_.bits.wdata := dataArb.io.in.head.bits.wdata) // tie off write ports by default data.io.req.bits <> dataArb.io.out.bits data.io.req.valid := dataArb.io.out.valid dataArb.io.out.ready := true.B metaArb.io.out.ready := clock_en_reg val tl_out_a = Wire(chiselTypeOf(tl_out.a)) tl_out.a <> { val a_queue_depth = outer.crossing match { case RationalCrossing(_) => // TODO make this depend on the actual ratio? if (cacheParams.separateUncachedResp) (maxUncachedInFlight + 1) / 2 else 2 min maxUncachedInFlight-1 case SynchronousCrossing(BufferParams.none) => 1 // Need some buffering to guarantee livelock freedom case SynchronousCrossing(_) => 0 // Adequate buffering within the crossing case _: AsynchronousCrossing => 0 // Adequate buffering within the crossing case _: CreditedCrossing => 0 // Adequate buffering within the crossing } Queue(tl_out_a, a_queue_depth, flow = true) } val (tl_out_c, release_queue_empty) = if (cacheParams.acquireBeforeRelease) { val q = Module(new Queue(chiselTypeOf(tl_out.c.bits), cacheDataBeats, flow = true)) tl_out.c <> q.io.deq (q.io.enq, q.io.count === 0.U) } else { (tl_out.c, true.B) } val s1_valid = RegNext(io.cpu.req.fire, false.B) val s1_probe = RegNext(tl_out.b.fire, false.B) val probe_bits = RegEnable(tl_out.b.bits, tl_out.b.fire) // TODO has data now :( val s1_nack = WireDefault(false.B) val s1_valid_masked = s1_valid && !io.cpu.s1_kill val s1_valid_not_nacked = s1_valid && !s1_nack val s1_tlb_req_valid = RegNext(io.tlb_port.req.fire, false.B) val s2_tlb_req_valid = RegNext(s1_tlb_req_valid, false.B) val s0_clk_en = metaArb.io.out.valid && !metaArb.io.out.bits.write val s0_req = WireInit(io.cpu.req.bits) s0_req.addr := Cat(metaArb.io.out.bits.addr >> blockOffBits, io.cpu.req.bits.addr(blockOffBits-1,0)) s0_req.idx.foreach(_ := Cat(metaArb.io.out.bits.idx, s0_req.addr(blockOffBits-1, 0))) when (!metaArb.io.in(7).ready) { s0_req.phys := true.B } val s1_req = RegEnable(s0_req, s0_clk_en) val s1_vaddr = Cat(s1_req.idx.getOrElse(s1_req.addr) >> tagLSB, s1_req.addr(tagLSB-1, 0)) val s0_tlb_req = WireInit(io.tlb_port.req.bits) when (!io.tlb_port.req.fire) { s0_tlb_req.passthrough := s0_req.phys s0_tlb_req.vaddr := s0_req.addr s0_tlb_req.size := s0_req.size s0_tlb_req.cmd := s0_req.cmd s0_tlb_req.prv := s0_req.dprv s0_tlb_req.v := s0_req.dv } val s1_tlb_req = RegEnable(s0_tlb_req, s0_clk_en || io.tlb_port.req.valid) val s1_read = isRead(s1_req.cmd) val s1_write = isWrite(s1_req.cmd) val s1_readwrite = s1_read || s1_write val s1_sfence = s1_req.cmd === M_SFENCE || s1_req.cmd === M_HFENCEV || s1_req.cmd === M_HFENCEG val s1_flush_line = s1_req.cmd === M_FLUSH_ALL && s1_req.size(0) val s1_flush_valid = Reg(Bool()) val s1_waw_hazard = Wire(Bool()) val s_ready :: s_voluntary_writeback :: s_probe_rep_dirty :: s_probe_rep_clean :: s_probe_retry :: s_probe_rep_miss :: s_voluntary_write_meta :: s_probe_write_meta :: s_dummy :: s_voluntary_release :: Nil = Enum(10) val supports_flush = outer.flushOnFenceI || coreParams.haveCFlush val flushed = RegInit(true.B) val flushing = RegInit(false.B) val flushing_req = Reg(chiselTypeOf(s1_req)) val cached_grant_wait = RegInit(false.B) val resetting = RegInit(false.B) val flushCounter = RegInit((nSets * (nWays-1)).U(log2Ceil(nSets * nWays).W)) val release_ack_wait = RegInit(false.B) val release_ack_addr = Reg(UInt(paddrBits.W)) val release_state = RegInit(s_ready) val refill_way = Reg(UInt()) val any_pstore_valid = Wire(Bool()) val inWriteback = release_state.isOneOf(s_voluntary_writeback, s_probe_rep_dirty) val releaseWay = Wire(UInt()) io.cpu.req.ready := (release_state === s_ready) && !cached_grant_wait && !s1_nack // I/O MSHRs val uncachedInFlight = RegInit(VecInit(Seq.fill(maxUncachedInFlight)(false.B))) val uncachedReqs = Reg(Vec(maxUncachedInFlight, new HellaCacheReq)) val uncachedResp = WireInit(new HellaCacheReq, DontCare) // hit initiation path val s0_read = isRead(io.cpu.req.bits.cmd) dataArb.io.in(3).valid := io.cpu.req.valid && likelyNeedsRead(io.cpu.req.bits) dataArb.io.in(3).bits := dataArb.io.in(1).bits dataArb.io.in(3).bits.write := false.B dataArb.io.in(3).bits.addr := Cat(io.cpu.req.bits.idx.getOrElse(io.cpu.req.bits.addr) >> tagLSB, io.cpu.req.bits.addr(tagLSB-1, 0)) dataArb.io.in(3).bits.wordMask := { val mask = (subWordBytes.log2 until rowOffBits).foldLeft(1.U) { case (in, i) => val upper_mask = Mux((i >= wordBytes.log2).B || io.cpu.req.bits.size <= i.U, 0.U, ((BigInt(1) << (1 << (i - subWordBytes.log2)))-1).U) val upper = Mux(io.cpu.req.bits.addr(i), in, 0.U) | upper_mask val lower = Mux(io.cpu.req.bits.addr(i), 0.U, in) upper ## lower } Fill(subWordBytes / eccBytes, mask) } dataArb.io.in(3).bits.eccMask := ~0.U((wordBytes / eccBytes).W) dataArb.io.in(3).bits.way_en := ~0.U(nWays.W) when (!dataArb.io.in(3).ready && s0_read) { io.cpu.req.ready := false.B } val s1_did_read = RegEnable(dataArb.io.in(3).ready && (io.cpu.req.valid && needsRead(io.cpu.req.bits)), s0_clk_en) val s1_read_mask = RegEnable(dataArb.io.in(3).bits.wordMask, s0_clk_en) metaArb.io.in(7).valid := io.cpu.req.valid metaArb.io.in(7).bits.write := false.B metaArb.io.in(7).bits.idx := dataArb.io.in(3).bits.addr(idxMSB, idxLSB) metaArb.io.in(7).bits.addr := io.cpu.req.bits.addr metaArb.io.in(7).bits.way_en := metaArb.io.in(4).bits.way_en metaArb.io.in(7).bits.data := metaArb.io.in(4).bits.data when (!metaArb.io.in(7).ready) { io.cpu.req.ready := false.B } // address translation val s1_cmd_uses_tlb = s1_readwrite || s1_flush_line || s1_req.cmd === M_WOK io.ptw <> tlb.io.ptw tlb.io.kill := io.cpu.s2_kill || s2_tlb_req_valid && io.tlb_port.s2_kill tlb.io.req.valid := s1_tlb_req_valid || s1_valid && !io.cpu.s1_kill && s1_cmd_uses_tlb tlb.io.req.bits := s1_tlb_req when (!tlb.io.req.ready && !tlb.io.ptw.resp.valid && !io.cpu.req.bits.phys) { io.cpu.req.ready := false.B } when (!s1_tlb_req_valid && s1_valid && s1_cmd_uses_tlb && tlb.io.resp.miss) { s1_nack := true.B } tlb.io.sfence.valid := s1_valid && !io.cpu.s1_kill && s1_sfence tlb.io.sfence.bits.rs1 := s1_req.size(0) tlb.io.sfence.bits.rs2 := s1_req.size(1) tlb.io.sfence.bits.asid := io.cpu.s1_data.data tlb.io.sfence.bits.addr := s1_req.addr tlb.io.sfence.bits.hv := s1_req.cmd === M_HFENCEV tlb.io.sfence.bits.hg := s1_req.cmd === M_HFENCEG io.tlb_port.req.ready := clock_en_reg io.tlb_port.s1_resp := tlb.io.resp when (s1_tlb_req_valid && s1_valid && !(s1_req.phys && s1_req.no_xcpt)) { s1_nack := true.B } pma_checker.io <> DontCare pma_checker.io.req.bits.passthrough := true.B pma_checker.io.req.bits.vaddr := s1_req.addr pma_checker.io.req.bits.size := s1_req.size pma_checker.io.req.bits.cmd := s1_req.cmd pma_checker.io.req.bits.prv := s1_req.dprv pma_checker.io.req.bits.v := s1_req.dv val s1_paddr = Cat(Mux(s1_tlb_req_valid, s1_req.addr(paddrBits-1, pgIdxBits), tlb.io.resp.paddr >> pgIdxBits), s1_req.addr(pgIdxBits-1, 0)) val s1_victim_way = Wire(UInt()) val (s1_hit_way, s1_hit_state, s1_meta) = if (usingDataScratchpad) { val baseAddr = p(LookupByHartId)(_.dcache.flatMap(_.scratch.map(_.U)), io_hartid.get) | io_mmio_address_prefix.get val inScratchpad = s1_paddr >= baseAddr && s1_paddr < baseAddr + (nSets * cacheBlockBytes).U val hitState = Mux(inScratchpad, ClientMetadata.maximum, ClientMetadata.onReset) val dummyMeta = L1Metadata(0.U, ClientMetadata.onReset) (inScratchpad, hitState, Seq(tECC.encode(dummyMeta.asUInt))) } else { val metaReq = metaArb.io.out val metaIdx = metaReq.bits.idx when (metaReq.valid && metaReq.bits.write) { val wmask = if (nWays == 1) Seq(true.B) else metaReq.bits.way_en.asBools tag_array.write(metaIdx, VecInit(Seq.fill(nWays)(metaReq.bits.data)), wmask) } val s1_meta = tag_array.read(metaIdx, metaReq.valid && !metaReq.bits.write) val s1_meta_uncorrected = s1_meta.map(tECC.decode(_).uncorrected.asTypeOf(new L1Metadata)) val s1_tag = s1_paddr >> tagLSB val s1_meta_hit_way = s1_meta_uncorrected.map(r => r.coh.isValid() && r.tag === s1_tag).asUInt val s1_meta_hit_state = ( s1_meta_uncorrected.map(r => Mux(r.tag === s1_tag && !s1_flush_valid, r.coh.asUInt, 0.U)) .reduce (_|_)).asTypeOf(chiselTypeOf(ClientMetadata.onReset)) (s1_meta_hit_way, s1_meta_hit_state, s1_meta) } val s1_data_way = WireDefault(if (nWays == 1) 1.U else Mux(inWriteback, releaseWay, s1_hit_way)) val tl_d_data_encoded = Wire(chiselTypeOf(encodeData(tl_out.d.bits.data, false.B))) val s1_all_data_ways = VecInit(data.io.resp ++ (!cacheParams.separateUncachedResp).option(tl_d_data_encoded)) val s1_mask_xwr = new StoreGen(s1_req.size, s1_req.addr, 0.U, wordBytes).mask val s1_mask = Mux(s1_req.cmd === M_PWR, io.cpu.s1_data.mask, s1_mask_xwr) // for partial writes, s1_data.mask must be a subset of s1_mask_xwr assert(!(s1_valid_masked && s1_req.cmd === M_PWR) || (s1_mask_xwr | ~io.cpu.s1_data.mask).andR) val s2_valid = RegNext(s1_valid_masked && !s1_sfence, init=false.B) val s2_valid_no_xcpt = s2_valid && !io.cpu.s2_xcpt.asUInt.orR val s2_probe = RegNext(s1_probe, init=false.B) val releaseInFlight = s1_probe || s2_probe || release_state =/= s_ready val s2_not_nacked_in_s1 = RegNext(!s1_nack) val s2_valid_not_nacked_in_s1 = s2_valid && s2_not_nacked_in_s1 val s2_valid_masked = s2_valid_no_xcpt && s2_not_nacked_in_s1 val s2_valid_not_killed = s2_valid_masked && !io.cpu.s2_kill val s2_req = Reg(chiselTypeOf(io.cpu.req.bits)) val s2_cmd_flush_all = s2_req.cmd === M_FLUSH_ALL && !s2_req.size(0) val s2_cmd_flush_line = s2_req.cmd === M_FLUSH_ALL && s2_req.size(0) val s2_tlb_xcpt = Reg(chiselTypeOf(tlb.io.resp)) val s2_pma = Reg(chiselTypeOf(tlb.io.resp)) val s2_uncached_resp_addr = Reg(chiselTypeOf(s2_req.addr)) // should be DCE'd in synthesis when (s1_valid_not_nacked || s1_flush_valid) { s2_req := s1_req s2_req.addr := s1_paddr s2_tlb_xcpt := tlb.io.resp s2_pma := Mux(s1_tlb_req_valid, pma_checker.io.resp, tlb.io.resp) } val s2_vaddr = Cat(RegEnable(s1_vaddr, s1_valid_not_nacked || s1_flush_valid) >> tagLSB, s2_req.addr(tagLSB-1, 0)) val s2_read = isRead(s2_req.cmd) val s2_write = isWrite(s2_req.cmd) val s2_readwrite = s2_read || s2_write val s2_flush_valid_pre_tag_ecc = RegNext(s1_flush_valid) val s1_meta_decoded = s1_meta.map(tECC.decode(_)) val s1_meta_clk_en = s1_valid_not_nacked || s1_flush_valid || s1_probe val s2_meta_correctable_errors = s1_meta_decoded.map(m => RegEnable(m.correctable, s1_meta_clk_en)).asUInt val s2_meta_uncorrectable_errors = s1_meta_decoded.map(m => RegEnable(m.uncorrectable, s1_meta_clk_en)).asUInt val s2_meta_error_uncorrectable = s2_meta_uncorrectable_errors.orR val s2_meta_corrected = s1_meta_decoded.map(m => RegEnable(m.corrected, s1_meta_clk_en).asTypeOf(new L1Metadata)) val s2_meta_error = (s2_meta_uncorrectable_errors | s2_meta_correctable_errors).orR val s2_flush_valid = s2_flush_valid_pre_tag_ecc && !s2_meta_error val s2_data = { val wordsPerRow = rowBits / subWordBits val en = s1_valid || inWriteback || io.cpu.replay_next val word_en = Mux(inWriteback, Fill(wordsPerRow, 1.U), Mux(s1_did_read, s1_read_mask, 0.U)) val s1_way_words = s1_all_data_ways.map(_.grouped(dECC.width(eccBits) * (subWordBits / eccBits))) if (cacheParams.pipelineWayMux) { val s1_word_en = Mux(io.cpu.replay_next, 0.U, word_en) (for (i <- 0 until wordsPerRow) yield { val s2_way_en = RegEnable(Mux(s1_word_en(i), s1_data_way, 0.U), en) val s2_way_words = (0 until nWays).map(j => RegEnable(s1_way_words(j)(i), en && word_en(i))) (0 until nWays).map(j => Mux(s2_way_en(j), s2_way_words(j), 0.U)).reduce(_|_) }).asUInt } else { val s1_word_en = Mux(!io.cpu.replay_next, word_en, UIntToOH(uncachedResp.addr.extract(log2Up(rowBits/8)-1, log2Up(wordBytes)), wordsPerRow)) (for (i <- 0 until wordsPerRow) yield { RegEnable(Mux1H(Mux(s1_word_en(i), s1_data_way, 0.U), s1_way_words.map(_(i))), en) }).asUInt } } val s2_probe_way = RegEnable(s1_hit_way, s1_probe) val s2_probe_state = RegEnable(s1_hit_state, s1_probe) val s2_hit_way = RegEnable(s1_hit_way, s1_valid_not_nacked) val s2_hit_state = RegEnable(s1_hit_state, s1_valid_not_nacked || s1_flush_valid) val s2_waw_hazard = RegEnable(s1_waw_hazard, s1_valid_not_nacked) val s2_store_merge = Wire(Bool()) val s2_hit_valid = s2_hit_state.isValid() val (s2_hit, s2_grow_param, s2_new_hit_state) = s2_hit_state.onAccess(s2_req.cmd) val s2_data_decoded = decodeData(s2_data) val s2_word_idx = s2_req.addr.extract(log2Up(rowBits/8)-1, log2Up(wordBytes)) val s2_data_error = s2_data_decoded.map(_.error).orR val s2_data_error_uncorrectable = s2_data_decoded.map(_.uncorrectable).orR val s2_data_corrected = (s2_data_decoded.map(_.corrected): Seq[UInt]).asUInt val s2_data_uncorrected = (s2_data_decoded.map(_.uncorrected): Seq[UInt]).asUInt val s2_valid_hit_maybe_flush_pre_data_ecc_and_waw = s2_valid_masked && !s2_meta_error && s2_hit val s2_no_alloc_hazard = if (!usingVM || pgIdxBits >= untagBits) false.B else { // make sure that any in-flight non-allocating accesses are ordered before // any allocating accesses. this can only happen if aliasing is possible. val any_no_alloc_in_flight = Reg(Bool()) when (!uncachedInFlight.asUInt.orR) { any_no_alloc_in_flight := false.B } when (s2_valid && s2_req.no_alloc) { any_no_alloc_in_flight := true.B } val s1_need_check = any_no_alloc_in_flight || s2_valid && s2_req.no_alloc val concerns = (uncachedInFlight zip uncachedReqs) :+ (s2_valid && s2_req.no_alloc, s2_req) val s1_uncached_hits = concerns.map { c => val concern_wmask = new StoreGen(c._2.size, c._2.addr, 0.U, wordBytes).mask val addr_match = (c._2.addr ^ s1_paddr)(pgIdxBits+pgLevelBits-1, wordBytes.log2) === 0.U val mask_match = (concern_wmask & s1_mask_xwr).orR || c._2.cmd === M_PWR || s1_req.cmd === M_PWR val cmd_match = isWrite(c._2.cmd) || isWrite(s1_req.cmd) c._1 && s1_need_check && cmd_match && addr_match && mask_match } val s2_uncached_hits = RegEnable(s1_uncached_hits.asUInt, s1_valid_not_nacked) s2_uncached_hits.orR } val s2_valid_hit_pre_data_ecc_and_waw = s2_valid_hit_maybe_flush_pre_data_ecc_and_waw && s2_readwrite && !s2_no_alloc_hazard val s2_valid_flush_line = s2_valid_hit_maybe_flush_pre_data_ecc_and_waw && s2_cmd_flush_line val s2_valid_hit_pre_data_ecc = s2_valid_hit_pre_data_ecc_and_waw && (!s2_waw_hazard || s2_store_merge) val s2_valid_data_error = s2_valid_hit_pre_data_ecc_and_waw && s2_data_error val s2_valid_hit = s2_valid_hit_pre_data_ecc && !s2_data_error val s2_valid_miss = s2_valid_masked && s2_readwrite && !s2_meta_error && !s2_hit val s2_uncached = !s2_pma.cacheable || s2_req.no_alloc && !s2_pma.must_alloc && !s2_hit_valid val s2_valid_cached_miss = s2_valid_miss && !s2_uncached && !uncachedInFlight.asUInt.orR dontTouch(s2_valid_cached_miss) val s2_want_victimize = (!usingDataScratchpad).B && (s2_valid_cached_miss || s2_valid_flush_line || s2_valid_data_error || s2_flush_valid) val s2_cannot_victimize = !s2_flush_valid && io.cpu.s2_kill val s2_victimize = s2_want_victimize && !s2_cannot_victimize val s2_valid_uncached_pending = s2_valid_miss && s2_uncached && !uncachedInFlight.asUInt.andR val s2_victim_way = UIntToOH(RegEnable(s1_victim_way, s1_valid_not_nacked || s1_flush_valid)) val s2_victim_or_hit_way = Mux(s2_hit_valid, s2_hit_way, s2_victim_way) val s2_victim_tag = Mux(s2_valid_data_error || s2_valid_flush_line, s2_req.addr(paddrBits-1, tagLSB), Mux1H(s2_victim_way, s2_meta_corrected).tag) val s2_victim_state = Mux(s2_hit_valid, s2_hit_state, Mux1H(s2_victim_way, s2_meta_corrected).coh) val (s2_prb_ack_data, s2_report_param, probeNewCoh)= s2_probe_state.onProbe(probe_bits.param) val (s2_victim_dirty, s2_shrink_param, voluntaryNewCoh) = s2_victim_state.onCacheControl(M_FLUSH) dontTouch(s2_victim_dirty) val s2_update_meta = s2_hit_state =/= s2_new_hit_state val s2_dont_nack_uncached = s2_valid_uncached_pending && tl_out_a.ready val s2_dont_nack_misc = s2_valid_masked && !s2_meta_error && (supports_flush.B && s2_cmd_flush_all && flushed && !flushing || supports_flush.B && s2_cmd_flush_line && !s2_hit || s2_req.cmd === M_WOK) io.cpu.s2_nack := s2_valid_no_xcpt && !s2_dont_nack_uncached && !s2_dont_nack_misc && !s2_valid_hit when (io.cpu.s2_nack || (s2_valid_hit_pre_data_ecc_and_waw && s2_update_meta)) { s1_nack := true.B } // tag updates on ECC errors val s2_first_meta_corrected = PriorityMux(s2_meta_correctable_errors, s2_meta_corrected) metaArb.io.in(1).valid := s2_meta_error && (s2_valid_masked || s2_flush_valid_pre_tag_ecc || s2_probe) metaArb.io.in(1).bits.write := true.B metaArb.io.in(1).bits.way_en := s2_meta_uncorrectable_errors | Mux(s2_meta_error_uncorrectable, 0.U, PriorityEncoderOH(s2_meta_correctable_errors)) metaArb.io.in(1).bits.idx := Mux(s2_probe, probeIdx(probe_bits), s2_vaddr(idxMSB, idxLSB)) metaArb.io.in(1).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, metaArb.io.in(1).bits.idx << blockOffBits) metaArb.io.in(1).bits.data := tECC.encode { val new_meta = WireDefault(s2_first_meta_corrected) when (s2_meta_error_uncorrectable) { new_meta.coh := ClientMetadata.onReset } new_meta.asUInt } // tag updates on hit metaArb.io.in(2).valid := s2_valid_hit_pre_data_ecc_and_waw && s2_update_meta metaArb.io.in(2).bits.write := !io.cpu.s2_kill metaArb.io.in(2).bits.way_en := s2_victim_or_hit_way metaArb.io.in(2).bits.idx := s2_vaddr(idxMSB, idxLSB) metaArb.io.in(2).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, s2_vaddr(idxMSB, 0)) metaArb.io.in(2).bits.data := tECC.encode(L1Metadata(s2_req.addr >> tagLSB, s2_new_hit_state).asUInt) // load reservations and TL error reporting val s2_lr = (usingAtomics && !usingDataScratchpad).B && s2_req.cmd === M_XLR val s2_sc = (usingAtomics && !usingDataScratchpad).B && s2_req.cmd === M_XSC val lrscCount = RegInit(0.U) val lrscValid = lrscCount > lrscBackoff.U val lrscBackingOff = lrscCount > 0.U && !lrscValid val lrscAddr = Reg(UInt()) val lrscAddrMatch = lrscAddr === (s2_req.addr >> blockOffBits) val s2_sc_fail = s2_sc && !(lrscValid && lrscAddrMatch) when ((s2_valid_hit && s2_lr && !cached_grant_wait || s2_valid_cached_miss) && !io.cpu.s2_kill) { lrscCount := Mux(s2_hit, (lrscCycles - 1).U, 0.U) lrscAddr := s2_req.addr >> blockOffBits } when (lrscCount > 0.U) { lrscCount := lrscCount - 1.U } when (s2_valid_not_killed && lrscValid) { lrscCount := lrscBackoff.U } when (s1_probe) { lrscCount := 0.U } // don't perform data correction if it might clobber a recent store val s2_correct = s2_data_error && !any_pstore_valid && !RegNext(any_pstore_valid || s2_valid) && usingDataScratchpad.B // pending store buffer val s2_valid_correct = s2_valid_hit_pre_data_ecc_and_waw && s2_correct && !io.cpu.s2_kill def s2_store_valid_pre_kill = s2_valid_hit && s2_write && !s2_sc_fail def s2_store_valid = s2_store_valid_pre_kill && !io.cpu.s2_kill val pstore1_cmd = RegEnable(s1_req.cmd, s1_valid_not_nacked && s1_write) val pstore1_addr = RegEnable(s1_vaddr, s1_valid_not_nacked && s1_write) val pstore1_data = RegEnable(io.cpu.s1_data.data, s1_valid_not_nacked && s1_write) val pstore1_way = RegEnable(s1_hit_way, s1_valid_not_nacked && s1_write) val pstore1_mask = RegEnable(s1_mask, s1_valid_not_nacked && s1_write) val pstore1_storegen_data = WireDefault(pstore1_data) val pstore1_rmw = usingRMW.B && RegEnable(needsRead(s1_req), s1_valid_not_nacked && s1_write) val pstore1_merge_likely = s2_valid_not_nacked_in_s1 && s2_write && s2_store_merge val pstore1_merge = s2_store_valid && s2_store_merge val pstore2_valid = RegInit(false.B) val pstore_drain_opportunistic = !(io.cpu.req.valid && likelyNeedsRead(io.cpu.req.bits)) && !(s1_valid && s1_waw_hazard) val pstore_drain_on_miss = releaseInFlight || RegNext(io.cpu.s2_nack) val pstore1_held = RegInit(false.B) val pstore1_valid_likely = s2_valid && s2_write || pstore1_held def pstore1_valid_not_rmw(s2_kill: Bool) = s2_valid_hit_pre_data_ecc && s2_write && !s2_kill || pstore1_held val pstore1_valid = s2_store_valid || pstore1_held any_pstore_valid := pstore1_held || pstore2_valid val pstore_drain_structural = pstore1_valid_likely && pstore2_valid && ((s1_valid && s1_write) || pstore1_rmw) assert(pstore1_rmw || pstore1_valid_not_rmw(io.cpu.s2_kill) === pstore1_valid) ccover(pstore_drain_structural, "STORE_STRUCTURAL_HAZARD", "D$ read-modify-write structural hazard") ccover(pstore1_valid && pstore_drain_on_miss, "STORE_DRAIN_ON_MISS", "D$ store buffer drain on miss") ccover(s1_valid_not_nacked && s1_waw_hazard, "WAW_HAZARD", "D$ write-after-write hazard") def should_pstore_drain(truly: Bool) = { val s2_kill = truly && io.cpu.s2_kill !pstore1_merge_likely && (usingRMW.B && pstore_drain_structural || (((pstore1_valid_not_rmw(s2_kill) && !pstore1_rmw) || pstore2_valid) && (pstore_drain_opportunistic || pstore_drain_on_miss))) } val pstore_drain = should_pstore_drain(true.B) pstore1_held := (s2_store_valid && !s2_store_merge || pstore1_held) && pstore2_valid && !pstore_drain val advance_pstore1 = (pstore1_valid || s2_valid_correct) && (pstore2_valid === pstore_drain) pstore2_valid := pstore2_valid && !pstore_drain || advance_pstore1 val pstore2_addr = RegEnable(Mux(s2_correct, s2_vaddr, pstore1_addr), advance_pstore1) val pstore2_way = RegEnable(Mux(s2_correct, s2_hit_way, pstore1_way), advance_pstore1) val pstore2_storegen_data = { for (i <- 0 until wordBytes) yield RegEnable(pstore1_storegen_data(8*(i+1)-1, 8*i), advance_pstore1 || pstore1_merge && pstore1_mask(i)) }.asUInt val pstore2_storegen_mask = { val mask = Reg(UInt(wordBytes.W)) when (advance_pstore1 || pstore1_merge) { val mergedMask = pstore1_mask | Mux(pstore1_merge, mask, 0.U) mask := ~Mux(s2_correct, 0.U, ~mergedMask) } mask } s2_store_merge := (if (eccBytes == 1) false.B else { ccover(pstore1_merge, "STORE_MERGED", "D$ store merged") // only merge stores to ECC granules that are already stored-to, to avoid // WAW hazards val wordMatch = (eccMask(pstore2_storegen_mask) | ~eccMask(pstore1_mask)).andR val idxMatch = s2_vaddr(untagBits-1, log2Ceil(wordBytes)) === pstore2_addr(untagBits-1, log2Ceil(wordBytes)) val tagMatch = (s2_hit_way & pstore2_way).orR pstore2_valid && wordMatch && idxMatch && tagMatch }) dataArb.io.in(0).valid := should_pstore_drain(false.B) dataArb.io.in(0).bits.write := pstore_drain dataArb.io.in(0).bits.addr := Mux(pstore2_valid, pstore2_addr, pstore1_addr) dataArb.io.in(0).bits.way_en := Mux(pstore2_valid, pstore2_way, pstore1_way) dataArb.io.in(0).bits.wdata := encodeData(Fill(rowWords, Mux(pstore2_valid, pstore2_storegen_data, pstore1_data)), false.B) dataArb.io.in(0).bits.wordMask := { val eccMask = dataArb.io.in(0).bits.eccMask.asBools.grouped(subWordBytes/eccBytes).map(_.orR).toSeq.asUInt val wordMask = UIntToOH(Mux(pstore2_valid, pstore2_addr, pstore1_addr).extract(rowOffBits-1, wordBytes.log2)) FillInterleaved(wordBytes/subWordBytes, wordMask) & Fill(rowBytes/wordBytes, eccMask) } dataArb.io.in(0).bits.eccMask := eccMask(Mux(pstore2_valid, pstore2_storegen_mask, pstore1_mask)) // store->load RAW hazard detection def s1Depends(addr: UInt, mask: UInt) = addr(idxMSB, wordOffBits) === s1_vaddr(idxMSB, wordOffBits) && Mux(s1_write, (eccByteMask(mask) & eccByteMask(s1_mask_xwr)).orR, (mask & s1_mask_xwr).orR) val s1_hazard = (pstore1_valid_likely && s1Depends(pstore1_addr, pstore1_mask)) || (pstore2_valid && s1Depends(pstore2_addr, pstore2_storegen_mask)) val s1_raw_hazard = s1_read && s1_hazard s1_waw_hazard := (if (eccBytes == 1) false.B else { ccover(s1_valid_not_nacked && s1_waw_hazard, "WAW_HAZARD", "D$ write-after-write hazard") s1_write && (s1_hazard || needsRead(s1_req) && !s1_did_read) }) when (s1_valid && s1_raw_hazard) { s1_nack := true.B } // performance hints to processor io.cpu.s2_nack_cause_raw := RegNext(s1_raw_hazard) || !(!s2_waw_hazard || s2_store_merge) // Prepare a TileLink request message that initiates a transaction val a_source = PriorityEncoder(~uncachedInFlight.asUInt << mmioOffset) // skip the MSHR val acquire_address = (s2_req.addr >> idxLSB) << idxLSB val access_address = s2_req.addr val a_size = s2_req.size val a_data = Fill(beatWords, pstore1_data) val a_mask = pstore1_mask << (access_address.extract(beatBytes.log2-1, wordBytes.log2) << 3) val get = edge.Get(a_source, access_address, a_size)._2 val put = edge.Put(a_source, access_address, a_size, a_data)._2 val putpartial = edge.Put(a_source, access_address, a_size, a_data, a_mask)._2 val atomics = if (edge.manager.anySupportLogical) { MuxLookup(s2_req.cmd, WireDefault(0.U.asTypeOf(new TLBundleA(edge.bundle))))(Array( M_XA_SWAP -> edge.Logical(a_source, access_address, a_size, a_data, TLAtomics.SWAP)._2, M_XA_XOR -> edge.Logical(a_source, access_address, a_size, a_data, TLAtomics.XOR) ._2, M_XA_OR -> edge.Logical(a_source, access_address, a_size, a_data, TLAtomics.OR) ._2, M_XA_AND -> edge.Logical(a_source, access_address, a_size, a_data, TLAtomics.AND) ._2, M_XA_ADD -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.ADD)._2, M_XA_MIN -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.MIN)._2, M_XA_MAX -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.MAX)._2, M_XA_MINU -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.MINU)._2, M_XA_MAXU -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.MAXU)._2)) } else { // If no managers support atomics, assert fail if processor asks for them assert (!(tl_out_a.valid && s2_read && s2_write && s2_uncached)) WireDefault(new TLBundleA(edge.bundle), DontCare) } tl_out_a.valid := !io.cpu.s2_kill && (s2_valid_uncached_pending || (s2_valid_cached_miss && !(release_ack_wait && (s2_req.addr ^ release_ack_addr)(((pgIdxBits + pgLevelBits) min paddrBits) - 1, idxLSB) === 0.U) && (cacheParams.acquireBeforeRelease.B && !release_ack_wait && release_queue_empty || !s2_victim_dirty))) tl_out_a.bits := Mux(!s2_uncached, acquire(s2_vaddr, s2_req.addr, s2_grow_param), Mux(!s2_write, get, Mux(s2_req.cmd === M_PWR, putpartial, Mux(!s2_read, put, atomics)))) // Drive APROT Bits tl_out_a.bits.user.lift(AMBAProt).foreach { x => val user_bit_cacheable = s2_pma.cacheable x.privileged := s2_req.dprv === PRV.M.U || user_bit_cacheable // if the address is cacheable, enable outer caches x.bufferable := user_bit_cacheable x.modifiable := user_bit_cacheable x.readalloc := user_bit_cacheable x.writealloc := user_bit_cacheable // Following are always tied off x.fetch := false.B x.secure := true.B } // Set pending bits for outstanding TileLink transaction val a_sel = UIntToOH(a_source, maxUncachedInFlight+mmioOffset) >> mmioOffset when (tl_out_a.fire) { when (s2_uncached) { (a_sel.asBools zip (uncachedInFlight zip uncachedReqs)) foreach { case (s, (f, r)) => when (s) { f := true.B r := s2_req r.cmd := Mux(s2_write, Mux(s2_req.cmd === M_PWR, M_PWR, M_XWR), M_XRD) } } }.otherwise { cached_grant_wait := true.B refill_way := s2_victim_or_hit_way } } // grant val (d_first, d_last, d_done, d_address_inc) = edge.addr_inc(tl_out.d) val (d_opc, grantIsUncached, grantIsUncachedData) = { val uncachedGrantOpcodesSansData = Seq(AccessAck, HintAck) val uncachedGrantOpcodesWithData = Seq(AccessAckData) val uncachedGrantOpcodes = uncachedGrantOpcodesWithData ++ uncachedGrantOpcodesSansData val whole_opc = tl_out.d.bits.opcode if (usingDataScratchpad) { assert(!tl_out.d.valid || whole_opc.isOneOf(uncachedGrantOpcodes)) // the only valid TL-D messages are uncached, so we can do some pruning val opc = whole_opc(uncachedGrantOpcodes.map(_.getWidth).max - 1, 0) val data = DecodeLogic(opc, uncachedGrantOpcodesWithData, uncachedGrantOpcodesSansData) (opc, true.B, data) } else { (whole_opc, whole_opc.isOneOf(uncachedGrantOpcodes), whole_opc.isOneOf(uncachedGrantOpcodesWithData)) } } tl_d_data_encoded := encodeData(tl_out.d.bits.data, tl_out.d.bits.corrupt && !io.ptw.customCSRs.suppressCorruptOnGrantData && !grantIsUncached) val grantIsCached = d_opc.isOneOf(Grant, GrantData) val grantIsVoluntary = d_opc === ReleaseAck // Clears a different pending bit val grantIsRefill = d_opc === GrantData // Writes the data array val grantInProgress = RegInit(false.B) val blockProbeAfterGrantCount = RegInit(0.U) when (blockProbeAfterGrantCount > 0.U) { blockProbeAfterGrantCount := blockProbeAfterGrantCount - 1.U } val canAcceptCachedGrant = !release_state.isOneOf(s_voluntary_writeback, s_voluntary_write_meta, s_voluntary_release) tl_out.d.ready := Mux(grantIsCached, (!d_first || tl_out.e.ready) && canAcceptCachedGrant, true.B) val uncachedRespIdxOH = UIntToOH(tl_out.d.bits.source, maxUncachedInFlight+mmioOffset) >> mmioOffset uncachedResp := Mux1H(uncachedRespIdxOH, uncachedReqs) when (tl_out.d.fire) { when (grantIsCached) { grantInProgress := true.B assert(cached_grant_wait, "A GrantData was unexpected by the dcache.") when(d_last) { cached_grant_wait := false.B grantInProgress := false.B blockProbeAfterGrantCount := (blockProbeAfterGrantCycles - 1).U replacer.miss } } .elsewhen (grantIsUncached) { (uncachedRespIdxOH.asBools zip uncachedInFlight) foreach { case (s, f) => when (s && d_last) { assert(f, "An AccessAck was unexpected by the dcache.") // TODO must handle Ack coming back on same cycle! f := false.B } } when (grantIsUncachedData) { if (!cacheParams.separateUncachedResp) { if (!cacheParams.pipelineWayMux) s1_data_way := 1.U << nWays s2_req.cmd := M_XRD s2_req.size := uncachedResp.size s2_req.signed := uncachedResp.signed s2_req.tag := uncachedResp.tag s2_req.addr := { require(rowOffBits >= beatOffBits) val dontCareBits = s1_paddr >> rowOffBits << rowOffBits dontCareBits | uncachedResp.addr(beatOffBits-1, 0) } s2_uncached_resp_addr := uncachedResp.addr } } } .elsewhen (grantIsVoluntary) { assert(release_ack_wait, "A ReleaseAck was unexpected by the dcache.") // TODO should handle Ack coming back on same cycle! release_ack_wait := false.B } } // Finish TileLink transaction by issuing a GrantAck tl_out.e.valid := tl_out.d.valid && d_first && grantIsCached && canAcceptCachedGrant tl_out.e.bits := edge.GrantAck(tl_out.d.bits) assert(tl_out.e.fire === (tl_out.d.fire && d_first && grantIsCached)) // data refill // note this ready-valid signaling ignores E-channel backpressure, which // benignly means the data RAM might occasionally be redundantly written dataArb.io.in(1).valid := tl_out.d.valid && grantIsRefill && canAcceptCachedGrant when (grantIsRefill && !dataArb.io.in(1).ready) { tl_out.e.valid := false.B tl_out.d.ready := false.B } if (!usingDataScratchpad) { dataArb.io.in(1).bits.write := true.B dataArb.io.in(1).bits.addr := (s2_vaddr >> idxLSB) << idxLSB | d_address_inc dataArb.io.in(1).bits.way_en := refill_way dataArb.io.in(1).bits.wdata := tl_d_data_encoded dataArb.io.in(1).bits.wordMask := ~0.U((rowBytes / subWordBytes).W) dataArb.io.in(1).bits.eccMask := ~0.U((wordBytes / eccBytes).W) } else { dataArb.io.in(1).bits := dataArb.io.in(0).bits } // tag updates on refill // ignore backpressure from metaArb, which can only be caused by tag ECC // errors on hit-under-miss. failing to write the new tag will leave the // line invalid, so we'll simply request the line again later. metaArb.io.in(3).valid := grantIsCached && d_done && !tl_out.d.bits.denied metaArb.io.in(3).bits.write := true.B metaArb.io.in(3).bits.way_en := refill_way metaArb.io.in(3).bits.idx := s2_vaddr(idxMSB, idxLSB) metaArb.io.in(3).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, s2_vaddr(idxMSB, 0)) metaArb.io.in(3).bits.data := tECC.encode(L1Metadata(s2_req.addr >> tagLSB, s2_hit_state.onGrant(s2_req.cmd, tl_out.d.bits.param)).asUInt) if (!cacheParams.separateUncachedResp) { // don't accept uncached grants if there's a structural hazard on s2_data... val blockUncachedGrant = Reg(Bool()) blockUncachedGrant := dataArb.io.out.valid when (grantIsUncachedData && (blockUncachedGrant || s1_valid)) { tl_out.d.ready := false.B // ...but insert bubble to guarantee grant's eventual forward progress when (tl_out.d.valid) { io.cpu.req.ready := false.B dataArb.io.in(1).valid := true.B dataArb.io.in(1).bits.write := false.B blockUncachedGrant := !dataArb.io.in(1).ready } } } ccover(tl_out.d.valid && !tl_out.d.ready, "BLOCK_D", "D$ D-channel blocked") // Handle an incoming TileLink Probe message val block_probe_for_core_progress = blockProbeAfterGrantCount > 0.U || lrscValid val block_probe_for_pending_release_ack = release_ack_wait && (tl_out.b.bits.address ^ release_ack_addr)(((pgIdxBits + pgLevelBits) min paddrBits) - 1, idxLSB) === 0.U val block_probe_for_ordering = releaseInFlight || block_probe_for_pending_release_ack || grantInProgress metaArb.io.in(6).valid := tl_out.b.valid && (!block_probe_for_core_progress || lrscBackingOff) tl_out.b.ready := metaArb.io.in(6).ready && !(block_probe_for_core_progress || block_probe_for_ordering || s1_valid || s2_valid) metaArb.io.in(6).bits.write := false.B metaArb.io.in(6).bits.idx := probeIdx(tl_out.b.bits) metaArb.io.in(6).bits.addr := Cat(io.cpu.req.bits.addr >> paddrBits, tl_out.b.bits.address) metaArb.io.in(6).bits.way_en := metaArb.io.in(4).bits.way_en metaArb.io.in(6).bits.data := metaArb.io.in(4).bits.data // replacement policy s1_victim_way := (if (replacer.perSet && nWays > 1) { val repl_array = Mem(nSets, UInt(replacer.nBits.W)) val s1_repl_idx = s1_req.addr(idxBits+blockOffBits-1, blockOffBits) val s2_repl_idx = s2_vaddr(idxBits+blockOffBits-1, blockOffBits) val s2_repl_state = Reg(UInt(replacer.nBits.W)) val s2_new_repl_state = replacer.get_next_state(s2_repl_state, OHToUInt(s2_hit_way)) val s2_repl_wen = s2_valid_masked && s2_hit_way.orR && s2_repl_state =/= s2_new_repl_state val s1_repl_state = Mux(s2_repl_wen && s2_repl_idx === s1_repl_idx, s2_new_repl_state, repl_array(s1_repl_idx)) when (s1_valid_not_nacked) { s2_repl_state := s1_repl_state } val waddr = Mux(resetting, flushCounter(idxBits-1, 0), s2_repl_idx) val wdata = Mux(resetting, 0.U, s2_new_repl_state) val wen = resetting || s2_repl_wen when (wen) { repl_array(waddr) := wdata } replacer.get_replace_way(s1_repl_state) } else { replacer.way }) // release val (c_first, c_last, releaseDone, c_count) = edge.count(tl_out_c) val releaseRejected = Wire(Bool()) val s1_release_data_valid = RegNext(dataArb.io.in(2).fire) val s2_release_data_valid = RegNext(s1_release_data_valid && !releaseRejected) releaseRejected := s2_release_data_valid && !tl_out_c.fire val releaseDataBeat = Cat(0.U, c_count) + Mux(releaseRejected, 0.U, s1_release_data_valid + Cat(0.U, s2_release_data_valid)) val nackResponseMessage = edge.ProbeAck(b = probe_bits, reportPermissions = TLPermissions.NtoN) val cleanReleaseMessage = edge.ProbeAck(b = probe_bits, reportPermissions = s2_report_param) val dirtyReleaseMessage = edge.ProbeAck(b = probe_bits, reportPermissions = s2_report_param, data = 0.U) tl_out_c.valid := (s2_release_data_valid || (!cacheParams.silentDrop.B && release_state === s_voluntary_release)) && !(c_first && release_ack_wait) tl_out_c.bits := nackResponseMessage val newCoh = WireDefault(probeNewCoh) releaseWay := s2_probe_way if (!usingDataScratchpad) { when (s2_victimize) { assert(s2_valid_flush_line || s2_flush_valid || io.cpu.s2_nack) val discard_line = s2_valid_flush_line && s2_req.size(1) || s2_flush_valid && flushing_req.size(1) release_state := Mux(s2_victim_dirty && !discard_line, s_voluntary_writeback, Mux(!cacheParams.silentDrop.B && !release_ack_wait && release_queue_empty && s2_victim_state.isValid() && (s2_valid_flush_line || s2_flush_valid || s2_readwrite && !s2_hit_valid), s_voluntary_release, s_voluntary_write_meta)) probe_bits := addressToProbe(s2_vaddr, Cat(s2_victim_tag, s2_req.addr(tagLSB-1, idxLSB)) << idxLSB) } when (s2_probe) { val probeNack = WireDefault(true.B) when (s2_meta_error) { release_state := s_probe_retry }.elsewhen (s2_prb_ack_data) { release_state := s_probe_rep_dirty }.elsewhen (s2_probe_state.isValid()) { tl_out_c.valid := true.B tl_out_c.bits := cleanReleaseMessage release_state := Mux(releaseDone, s_probe_write_meta, s_probe_rep_clean) }.otherwise { tl_out_c.valid := true.B probeNack := !releaseDone release_state := Mux(releaseDone, s_ready, s_probe_rep_miss) } when (probeNack) { s1_nack := true.B } } when (release_state === s_probe_retry) { metaArb.io.in(6).valid := true.B metaArb.io.in(6).bits.idx := probeIdx(probe_bits) metaArb.io.in(6).bits.addr := Cat(io.cpu.req.bits.addr >> paddrBits, probe_bits.address) when (metaArb.io.in(6).ready) { release_state := s_ready s1_probe := true.B } } when (release_state === s_probe_rep_miss) { tl_out_c.valid := true.B when (releaseDone) { release_state := s_ready } } when (release_state === s_probe_rep_clean) { tl_out_c.valid := true.B tl_out_c.bits := cleanReleaseMessage when (releaseDone) { release_state := s_probe_write_meta } } when (release_state === s_probe_rep_dirty) { tl_out_c.bits := dirtyReleaseMessage when (releaseDone) { release_state := s_probe_write_meta } } when (release_state.isOneOf(s_voluntary_writeback, s_voluntary_write_meta, s_voluntary_release)) { when (release_state === s_voluntary_release) { tl_out_c.bits := edge.Release(fromSource = 0.U, toAddress = 0.U, lgSize = lgCacheBlockBytes.U, shrinkPermissions = s2_shrink_param)._2 }.otherwise { tl_out_c.bits := edge.Release(fromSource = 0.U, toAddress = 0.U, lgSize = lgCacheBlockBytes.U, shrinkPermissions = s2_shrink_param, data = 0.U)._2 } newCoh := voluntaryNewCoh releaseWay := s2_victim_or_hit_way when (releaseDone) { release_state := s_voluntary_write_meta } when (tl_out_c.fire && c_first) { release_ack_wait := true.B release_ack_addr := probe_bits.address } } tl_out_c.bits.source := probe_bits.source tl_out_c.bits.address := probe_bits.address tl_out_c.bits.data := s2_data_corrected tl_out_c.bits.corrupt := inWriteback && s2_data_error_uncorrectable } tl_out_c.bits.user.lift(AMBAProt).foreach { x => x.fetch := false.B x.secure := true.B x.privileged := true.B x.bufferable := true.B x.modifiable := true.B x.readalloc := true.B x.writealloc := true.B } dataArb.io.in(2).valid := inWriteback && releaseDataBeat < refillCycles.U dataArb.io.in(2).bits := dataArb.io.in(1).bits dataArb.io.in(2).bits.write := false.B dataArb.io.in(2).bits.addr := (probeIdx(probe_bits) << blockOffBits) | (releaseDataBeat(log2Up(refillCycles)-1,0) << rowOffBits) dataArb.io.in(2).bits.wordMask := ~0.U((rowBytes / subWordBytes).W) dataArb.io.in(2).bits.eccMask := ~0.U((wordBytes / eccBytes).W) dataArb.io.in(2).bits.way_en := ~0.U(nWays.W) metaArb.io.in(4).valid := release_state.isOneOf(s_voluntary_write_meta, s_probe_write_meta) metaArb.io.in(4).bits.write := true.B metaArb.io.in(4).bits.way_en := releaseWay metaArb.io.in(4).bits.idx := probeIdx(probe_bits) metaArb.io.in(4).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, probe_bits.address(idxMSB, 0)) metaArb.io.in(4).bits.data := tECC.encode(L1Metadata(tl_out_c.bits.address >> tagLSB, newCoh).asUInt) when (metaArb.io.in(4).fire) { release_state := s_ready } // cached response (io.cpu.resp.bits: Data).waiveAll :<>= (s2_req: Data).waiveAll io.cpu.resp.bits.has_data := s2_read io.cpu.resp.bits.replay := false.B io.cpu.s2_uncached := s2_uncached && !s2_hit io.cpu.s2_paddr := s2_req.addr io.cpu.s2_gpa := s2_tlb_xcpt.gpa io.cpu.s2_gpa_is_pte := s2_tlb_xcpt.gpa_is_pte // report whether there are any outstanding accesses. disregard any // slave-port accesses, since they don't affect local memory ordering. val s1_isSlavePortAccess = s1_req.no_xcpt val s2_isSlavePortAccess = s2_req.no_xcpt io.cpu.ordered := !(s1_valid && !s1_isSlavePortAccess || s2_valid && !s2_isSlavePortAccess || cached_grant_wait || uncachedInFlight.asUInt.orR) io.cpu.store_pending := (cached_grant_wait && isWrite(s2_req.cmd)) || uncachedInFlight.asUInt.orR val s1_xcpt_valid = tlb.io.req.valid && !s1_isSlavePortAccess && !s1_nack io.cpu.s2_xcpt := Mux(RegNext(s1_xcpt_valid), s2_tlb_xcpt, 0.U.asTypeOf(s2_tlb_xcpt)) if (usingDataScratchpad) { assert(!(s2_valid_masked && s2_req.cmd.isOneOf(M_XLR, M_XSC))) } else { ccover(tl_out.b.valid && !tl_out.b.ready, "BLOCK_B", "D$ B-channel blocked") } // uncached response val s1_uncached_data_word = { val word_idx = uncachedResp.addr.extract(log2Up(rowBits/8)-1, log2Up(wordBytes)) val words = tl_out.d.bits.data.grouped(wordBits) words(word_idx) } val s2_uncached_data_word = RegEnable(s1_uncached_data_word, io.cpu.replay_next) val doUncachedResp = RegNext(io.cpu.replay_next) io.cpu.resp.valid := (s2_valid_hit_pre_data_ecc || doUncachedResp) && !s2_data_error io.cpu.replay_next := tl_out.d.fire && grantIsUncachedData && !cacheParams.separateUncachedResp.B when (doUncachedResp) { assert(!s2_valid_hit) io.cpu.resp.bits.replay := true.B io.cpu.resp.bits.addr := s2_uncached_resp_addr } io.cpu.uncached_resp.map { resp => resp.valid := tl_out.d.valid && grantIsUncachedData resp.bits.tag := uncachedResp.tag resp.bits.size := uncachedResp.size resp.bits.signed := uncachedResp.signed resp.bits.data := new LoadGen(uncachedResp.size, uncachedResp.signed, uncachedResp.addr, s1_uncached_data_word, false.B, wordBytes).data resp.bits.data_raw := s1_uncached_data_word when (grantIsUncachedData && !resp.ready) { tl_out.d.ready := false.B } } // load data subword mux/sign extension val s2_data_word = (0 until rowBits by wordBits).map(i => s2_data_uncorrected(wordBits+i-1,i)).reduce(_|_) val s2_data_word_corrected = (0 until rowBits by wordBits).map(i => s2_data_corrected(wordBits+i-1,i)).reduce(_|_) val s2_data_word_possibly_uncached = Mux(cacheParams.pipelineWayMux.B && doUncachedResp, s2_uncached_data_word, 0.U) | s2_data_word val loadgen = new LoadGen(s2_req.size, s2_req.signed, s2_req.addr, s2_data_word_possibly_uncached, s2_sc, wordBytes) io.cpu.resp.bits.data := loadgen.data | s2_sc_fail io.cpu.resp.bits.data_word_bypass := loadgen.wordData io.cpu.resp.bits.data_raw := s2_data_word io.cpu.resp.bits.store_data := pstore1_data // AMOs if (usingRMW) { val amoalus = (0 until coreDataBits / xLen).map { i => val amoalu = Module(new AMOALU(xLen)) amoalu.io.mask := pstore1_mask >> (i * xBytes) amoalu.io.cmd := (if (usingAtomicsInCache) pstore1_cmd else M_XWR) amoalu.io.lhs := s2_data_word >> (i * xLen) amoalu.io.rhs := pstore1_data >> (i * xLen) amoalu } pstore1_storegen_data := (if (!usingDataScratchpad) amoalus.map(_.io.out).asUInt else { val mask = FillInterleaved(8, Mux(s2_correct, 0.U, pstore1_mask)) amoalus.map(_.io.out_unmasked).asUInt & mask | s2_data_word_corrected & ~mask }) } else if (!usingAtomics) { assert(!(s1_valid_masked && s1_read && s1_write), "unsupported D$ operation") } if (coreParams.useVector) { edge.manager.managers.foreach { m => // Statically ensure that no-allocate accesses are permitted. // We could consider turning some of these into dynamic PMA checks. require(!m.supportsAcquireB || m.supportsGet, "With a vector unit, cacheable memory must support Get") require(!m.supportsAcquireT || m.supportsPutPartial, "With a vector unit, cacheable memory must support PutPartial") } } // flushes if (!usingDataScratchpad) when (RegNext(reset.asBool)) { resetting := true.B } val flushCounterNext = flushCounter +& 1.U val flushDone = (flushCounterNext >> log2Ceil(nSets)) === nWays.U val flushCounterWrap = flushCounterNext(log2Ceil(nSets)-1, 0) ccover(s2_valid_masked && s2_cmd_flush_all && s2_meta_error, "TAG_ECC_ERROR_DURING_FENCE_I", "D$ ECC error in tag array during cache flush") ccover(s2_valid_masked && s2_cmd_flush_all && s2_data_error, "DATA_ECC_ERROR_DURING_FENCE_I", "D$ ECC error in data array during cache flush") s1_flush_valid := metaArb.io.in(5).fire && !s1_flush_valid && !s2_flush_valid_pre_tag_ecc && release_state === s_ready && !release_ack_wait metaArb.io.in(5).valid := flushing && !flushed metaArb.io.in(5).bits.write := false.B metaArb.io.in(5).bits.idx := flushCounter(idxBits-1, 0) metaArb.io.in(5).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, metaArb.io.in(5).bits.idx << blockOffBits) metaArb.io.in(5).bits.way_en := metaArb.io.in(4).bits.way_en metaArb.io.in(5).bits.data := metaArb.io.in(4).bits.data // Only flush D$ on FENCE.I if some cached executable regions are untracked. if (supports_flush) { when (s2_valid_masked && s2_cmd_flush_all) { when (!flushed && !io.cpu.s2_kill && !release_ack_wait && !uncachedInFlight.asUInt.orR) { flushing := true.B flushing_req := s2_req } } when (tl_out_a.fire && !s2_uncached) { flushed := false.B } when (flushing) { s1_victim_way := flushCounter >> log2Up(nSets) when (s2_flush_valid) { flushCounter := flushCounterNext when (flushDone) { flushed := true.B if (!isPow2(nWays)) flushCounter := flushCounterWrap } } when (flushed && release_state === s_ready && !release_ack_wait) { flushing := false.B } } } metaArb.io.in(0).valid := resetting metaArb.io.in(0).bits := metaArb.io.in(5).bits metaArb.io.in(0).bits.write := true.B metaArb.io.in(0).bits.way_en := ~0.U(nWays.W) metaArb.io.in(0).bits.data := tECC.encode(L1Metadata(0.U, ClientMetadata.onReset).asUInt) when (resetting) { flushCounter := flushCounterNext when (flushDone) { resetting := false.B if (!isPow2(nWays)) flushCounter := flushCounterWrap } } // gate the clock clock_en_reg := !cacheParams.clockGate.B || io.ptw.customCSRs.disableDCacheClockGate || io.cpu.keep_clock_enabled || metaArb.io.out.valid || // subsumes resetting || flushing s1_probe || s2_probe || s1_valid || s2_valid || io.tlb_port.req.valid || s1_tlb_req_valid || s2_tlb_req_valid || pstore1_held || pstore2_valid || release_state =/= s_ready || release_ack_wait || !release_queue_empty || !tlb.io.req.ready || cached_grant_wait || uncachedInFlight.asUInt.orR || lrscCount > 0.U || blockProbeAfterGrantCount > 0.U // performance events io.cpu.perf.acquire := edge.done(tl_out_a) io.cpu.perf.release := edge.done(tl_out_c) io.cpu.perf.grant := tl_out.d.valid && d_last io.cpu.perf.tlbMiss := io.ptw.req.fire io.cpu.perf.storeBufferEmptyAfterLoad := !( (s1_valid && s1_write) || ((s2_valid && s2_write && !s2_waw_hazard) || pstore1_held) || pstore2_valid) io.cpu.perf.storeBufferEmptyAfterStore := !( (s1_valid && s1_write) || (s2_valid && s2_write && pstore1_rmw) || ((s2_valid && s2_write && !s2_waw_hazard || pstore1_held) && pstore2_valid)) io.cpu.perf.canAcceptStoreThenLoad := !( ((s2_valid && s2_write && pstore1_rmw) && (s1_valid && s1_write && !s1_waw_hazard)) || (pstore2_valid && pstore1_valid_likely && (s1_valid && s1_write))) io.cpu.perf.canAcceptStoreThenRMW := io.cpu.perf.canAcceptStoreThenLoad && !pstore2_valid io.cpu.perf.canAcceptLoadThenLoad := !((s1_valid && s1_write && needsRead(s1_req)) && ((s2_valid && s2_write && !s2_waw_hazard || pstore1_held) || pstore2_valid)) io.cpu.perf.blocked := { // stop reporting blocked just before unblocking to avoid overly conservative stalling val beatsBeforeEnd = outer.crossing match { case SynchronousCrossing(_) => 2 case RationalCrossing(_) => 1 // assumes 1 < ratio <= 2; need more bookkeeping for optimal handling of >2 case _: AsynchronousCrossing => 1 // likewise case _: CreditedCrossing => 1 // likewise } val near_end_of_refill = if (cacheBlockBytes / beatBytes <= beatsBeforeEnd) tl_out.d.valid else { val refill_count = RegInit(0.U((cacheBlockBytes / beatBytes).log2.W)) when (tl_out.d.fire && grantIsRefill) { refill_count := refill_count + 1.U } refill_count >= (cacheBlockBytes / beatBytes - beatsBeforeEnd).U } cached_grant_wait && !near_end_of_refill } // report errors val (data_error, data_error_uncorrectable, data_error_addr) = if (usingDataScratchpad) (s2_valid_data_error, s2_data_error_uncorrectable, s2_req.addr) else { (RegNext(tl_out_c.fire && inWriteback && s2_data_error), RegNext(s2_data_error_uncorrectable), probe_bits.address) // This is stable for a cycle after tl_out_c.fire, so don't need a register } { val error_addr = Mux(metaArb.io.in(1).valid, Cat(s2_first_meta_corrected.tag, metaArb.io.in(1).bits.addr(tagLSB-1, idxLSB)), data_error_addr >> idxLSB) << idxLSB io.errors.uncorrectable.foreach { u => u.valid := metaArb.io.in(1).valid && s2_meta_error_uncorrectable || data_error && data_error_uncorrectable u.bits := error_addr } io.errors.correctable.foreach { c => c.valid := metaArb.io.in(1).valid || data_error c.bits := error_addr io.errors.uncorrectable.foreach { u => when (u.valid) { c.valid := false.B } } } io.errors.bus.valid := tl_out.d.fire && (tl_out.d.bits.denied || tl_out.d.bits.corrupt) io.errors.bus.bits := Mux(grantIsCached, s2_req.addr >> idxLSB << idxLSB, 0.U) ccoverNotScratchpad(io.errors.bus.valid && grantIsCached, "D_ERROR_CACHED", "D$ D-channel error, cached") ccover(io.errors.bus.valid && !grantIsCached, "D_ERROR_UNCACHED", "D$ D-channel error, uncached") } if (usingDataScratchpad) { val data_error_cover = Seq( property.CoverBoolean(!data_error, Seq("no_data_error")), property.CoverBoolean(data_error && !data_error_uncorrectable, Seq("data_correctable_error")), property.CoverBoolean(data_error && data_error_uncorrectable, Seq("data_uncorrectable_error"))) val request_source = Seq( property.CoverBoolean(s2_isSlavePortAccess, Seq("from_TL")), property.CoverBoolean(!s2_isSlavePortAccess, Seq("from_CPU"))) property.cover(new property.CrossProperty( Seq(data_error_cover, request_source), Seq(), "MemorySystem;;Scratchpad Memory Bit Flip Cross Covers")) } else { val data_error_type = Seq( property.CoverBoolean(!s2_valid_data_error, Seq("no_data_error")), property.CoverBoolean(s2_valid_data_error && !s2_data_error_uncorrectable, Seq("data_correctable_error")), property.CoverBoolean(s2_valid_data_error && s2_data_error_uncorrectable, Seq("data_uncorrectable_error"))) val data_error_dirty = Seq( property.CoverBoolean(!s2_victim_dirty, Seq("data_clean")), property.CoverBoolean(s2_victim_dirty, Seq("data_dirty"))) val request_source = if (supports_flush) { Seq( property.CoverBoolean(!flushing, Seq("access")), property.CoverBoolean(flushing, Seq("during_flush"))) } else { Seq(property.CoverBoolean(true.B, Seq("never_flush"))) } val tag_error_cover = Seq( property.CoverBoolean( !s2_meta_error, Seq("no_tag_error")), property.CoverBoolean( s2_meta_error && !s2_meta_error_uncorrectable, Seq("tag_correctable_error")), property.CoverBoolean( s2_meta_error && s2_meta_error_uncorrectable, Seq("tag_uncorrectable_error"))) property.cover(new property.CrossProperty( Seq(data_error_type, data_error_dirty, request_source, tag_error_cover), Seq(), "MemorySystem;;Cache Memory Bit Flip Cross Covers")) } } // leaving gated-clock domain val dcacheImpl = withClock (gated_clock) { new DCacheModuleImpl } def encodeData(x: UInt, poison: Bool) = x.grouped(eccBits).map(dECC.encode(_, if (dECC.canDetect) poison else false.B)).asUInt def dummyEncodeData(x: UInt) = x.grouped(eccBits).map(dECC.swizzle(_)).asUInt def decodeData(x: UInt) = x.grouped(dECC.width(eccBits)).map(dECC.decode(_)) def eccMask(byteMask: UInt) = byteMask.grouped(eccBytes).map(_.orR).asUInt def eccByteMask(byteMask: UInt) = FillInterleaved(eccBytes, eccMask(byteMask)) def likelyNeedsRead(req: HellaCacheReq) = { val res = !req.cmd.isOneOf(M_XWR, M_PFW) || req.size < log2Ceil(eccBytes).U assert(!needsRead(req) || res) res } def needsRead(req: HellaCacheReq) = isRead(req.cmd) || (isWrite(req.cmd) && (req.cmd === M_PWR || req.size < log2Ceil(eccBytes).U)) def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = property.cover(cond, s"DCACHE_$label", "MemorySystem;;" + desc) def ccoverNotScratchpad(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = if (!usingDataScratchpad) ccover(cond, label, desc) require(!usingVM || tagLSB <= pgIdxBits, s"D$$ set size must not exceed ${1<<(pgIdxBits-10)} KiB; got ${(nSets * cacheBlockBytes)>>10} KiB") def tagLSB: Int = untagBits def probeIdx(b: TLBundleB): UInt = b.address(idxMSB, idxLSB) def addressToProbe(vaddr: UInt, paddr: UInt): TLBundleB = { val res = Wire(new TLBundleB(edge.bundle)) res :#= DontCare res.address := paddr res.source := (mmioOffset - 1).U res } def acquire(vaddr: UInt, paddr: UInt, param: UInt): TLBundleA = { if (!edge.manager.anySupportAcquireB) WireDefault(0.U.asTypeOf(new TLBundleA(edge.bundle))) else edge.AcquireBlock(0.U, paddr >> lgCacheBlockBytes << lgCacheBlockBytes, lgCacheBlockBytes.U, param)._2 } } File DescribedSRAM.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3.{Data, SyncReadMem, Vec} import chisel3.util.log2Ceil object DescribedSRAM { def apply[T <: Data]( name: String, desc: String, size: BigInt, // depth data: T ): SyncReadMem[T] = { val mem = SyncReadMem(size, data) mem.suggestName(name) val granWidth = data match { case v: Vec[_] => v.head.getWidth case d => d.getWidth } val uid = 0 Annotated.srams( component = mem, name = name, address_width = log2Ceil(size), data_width = data.getWidth, depth = size, description = desc, write_mask_granularity = granWidth ) mem } }
module DCacheDataArray_1( // @[DCache.scala:49:7] input clock, // @[DCache.scala:49:7] input reset, // @[DCache.scala:49:7] input io_req_valid, // @[DCache.scala:50:14] input [11:0] io_req_bits_addr, // @[DCache.scala:50:14] input io_req_bits_write, // @[DCache.scala:50:14] input [63:0] io_req_bits_wdata, // @[DCache.scala:50:14] input io_req_bits_wordMask, // @[DCache.scala:50:14] input [7:0] io_req_bits_eccMask, // @[DCache.scala:50:14] input [7:0] io_req_bits_way_en, // @[DCache.scala:50:14] output [63:0] io_resp_0, // @[DCache.scala:50:14] output [63:0] io_resp_1, // @[DCache.scala:50:14] output [63:0] io_resp_2, // @[DCache.scala:50:14] output [63:0] io_resp_3, // @[DCache.scala:50:14] output [63:0] io_resp_4, // @[DCache.scala:50:14] output [63:0] io_resp_5, // @[DCache.scala:50:14] output [63:0] io_resp_6, // @[DCache.scala:50:14] output [63:0] io_resp_7 // @[DCache.scala:50:14] ); wire [511:0] _rockettile_dcache_data_arrays_0_RW0_rdata; // @[DescribedSRAM.scala:17:26] wire io_req_valid_0 = io_req_valid; // @[DCache.scala:49:7] wire [11:0] io_req_bits_addr_0 = io_req_bits_addr; // @[DCache.scala:49:7] wire io_req_bits_write_0 = io_req_bits_write; // @[DCache.scala:49:7] wire [63:0] io_req_bits_wdata_0 = io_req_bits_wdata; // @[DCache.scala:49:7] wire io_req_bits_wordMask_0 = io_req_bits_wordMask; // @[DCache.scala:49:7] wire [7:0] io_req_bits_eccMask_0 = io_req_bits_eccMask; // @[DCache.scala:49:7] wire [7:0] io_req_bits_way_en_0 = io_req_bits_way_en; // @[DCache.scala:49:7] wire _rdata_valid_T_1 = 1'h1; // @[DCache.scala:71:60] wire rdata_valid = io_req_valid_0; // @[DCache.scala:49:7, :71:30] wire [63:0] wWords_0 = io_req_bits_wdata_0; // @[package.scala:211:50] wire _rdata_valid_T = io_req_bits_wordMask_0; // @[DCache.scala:49:7, :71:83] wire [63:0] rdata_0_0; // @[package.scala:45:27] wire [63:0] rdata_0_1; // @[package.scala:45:27] wire [63:0] rdata_0_2; // @[package.scala:45:27] wire [63:0] rdata_0_3; // @[package.scala:45:27] wire [63:0] rdata_0_4; // @[package.scala:45:27] wire [63:0] rdata_0_5; // @[package.scala:45:27] wire [63:0] rdata_0_6; // @[package.scala:45:27] wire [63:0] rdata_0_7; // @[package.scala:45:27] wire [63:0] io_resp_0_0; // @[DCache.scala:49:7] wire [63:0] io_resp_1_0; // @[DCache.scala:49:7] wire [63:0] io_resp_2_0; // @[DCache.scala:49:7] wire [63:0] io_resp_3_0; // @[DCache.scala:49:7] wire [63:0] io_resp_4_0; // @[DCache.scala:49:7] wire [63:0] io_resp_5_0; // @[DCache.scala:49:7] wire [63:0] io_resp_6_0; // @[DCache.scala:49:7] wire [63:0] io_resp_7_0; // @[DCache.scala:49:7] wire eccMask_0 = io_req_bits_eccMask_0[0]; // @[DCache.scala:49:7, :56:82] wire eccMask_1 = io_req_bits_eccMask_0[1]; // @[DCache.scala:49:7, :56:82] wire eccMask_2 = io_req_bits_eccMask_0[2]; // @[DCache.scala:49:7, :56:82] wire eccMask_3 = io_req_bits_eccMask_0[3]; // @[DCache.scala:49:7, :56:82] wire eccMask_4 = io_req_bits_eccMask_0[4]; // @[DCache.scala:49:7, :56:82] wire eccMask_5 = io_req_bits_eccMask_0[5]; // @[DCache.scala:49:7, :56:82] wire eccMask_6 = io_req_bits_eccMask_0[6]; // @[DCache.scala:49:7, :56:82] wire eccMask_7 = io_req_bits_eccMask_0[7]; // @[DCache.scala:49:7, :56:82] wire _wMask_T = io_req_bits_way_en_0[0]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_1 = io_req_bits_way_en_0[0]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_2 = io_req_bits_way_en_0[0]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_3 = io_req_bits_way_en_0[0]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_4 = io_req_bits_way_en_0[0]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_5 = io_req_bits_way_en_0[0]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_6 = io_req_bits_way_en_0[0]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_7 = io_req_bits_way_en_0[0]; // @[DCache.scala:49:7, :57:108] wire wMask_0 = eccMask_0 & _wMask_T; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_1 = eccMask_1 & _wMask_T_1; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_2 = eccMask_2 & _wMask_T_2; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_3 = eccMask_3 & _wMask_T_3; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_4 = eccMask_4 & _wMask_T_4; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_5 = eccMask_5 & _wMask_T_5; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_6 = eccMask_6 & _wMask_T_6; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_7 = eccMask_7 & _wMask_T_7; // @[DCache.scala:56:82, :57:{87,108}] wire _wMask_T_8 = io_req_bits_way_en_0[1]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_9 = io_req_bits_way_en_0[1]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_10 = io_req_bits_way_en_0[1]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_11 = io_req_bits_way_en_0[1]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_12 = io_req_bits_way_en_0[1]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_13 = io_req_bits_way_en_0[1]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_14 = io_req_bits_way_en_0[1]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_15 = io_req_bits_way_en_0[1]; // @[DCache.scala:49:7, :57:108] wire wMask_8 = eccMask_0 & _wMask_T_8; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_9 = eccMask_1 & _wMask_T_9; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_10 = eccMask_2 & _wMask_T_10; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_11 = eccMask_3 & _wMask_T_11; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_12 = eccMask_4 & _wMask_T_12; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_13 = eccMask_5 & _wMask_T_13; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_14 = eccMask_6 & _wMask_T_14; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_15 = eccMask_7 & _wMask_T_15; // @[DCache.scala:56:82, :57:{87,108}] wire _wMask_T_16 = io_req_bits_way_en_0[2]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_17 = io_req_bits_way_en_0[2]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_18 = io_req_bits_way_en_0[2]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_19 = io_req_bits_way_en_0[2]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_20 = io_req_bits_way_en_0[2]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_21 = io_req_bits_way_en_0[2]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_22 = io_req_bits_way_en_0[2]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_23 = io_req_bits_way_en_0[2]; // @[DCache.scala:49:7, :57:108] wire wMask_16 = eccMask_0 & _wMask_T_16; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_17 = eccMask_1 & _wMask_T_17; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_18 = eccMask_2 & _wMask_T_18; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_19 = eccMask_3 & _wMask_T_19; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_20 = eccMask_4 & _wMask_T_20; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_21 = eccMask_5 & _wMask_T_21; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_22 = eccMask_6 & _wMask_T_22; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_23 = eccMask_7 & _wMask_T_23; // @[DCache.scala:56:82, :57:{87,108}] wire _wMask_T_24 = io_req_bits_way_en_0[3]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_25 = io_req_bits_way_en_0[3]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_26 = io_req_bits_way_en_0[3]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_27 = io_req_bits_way_en_0[3]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_28 = io_req_bits_way_en_0[3]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_29 = io_req_bits_way_en_0[3]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_30 = io_req_bits_way_en_0[3]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_31 = io_req_bits_way_en_0[3]; // @[DCache.scala:49:7, :57:108] wire wMask_24 = eccMask_0 & _wMask_T_24; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_25 = eccMask_1 & _wMask_T_25; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_26 = eccMask_2 & _wMask_T_26; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_27 = eccMask_3 & _wMask_T_27; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_28 = eccMask_4 & _wMask_T_28; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_29 = eccMask_5 & _wMask_T_29; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_30 = eccMask_6 & _wMask_T_30; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_31 = eccMask_7 & _wMask_T_31; // @[DCache.scala:56:82, :57:{87,108}] wire _wMask_T_32 = io_req_bits_way_en_0[4]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_33 = io_req_bits_way_en_0[4]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_34 = io_req_bits_way_en_0[4]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_35 = io_req_bits_way_en_0[4]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_36 = io_req_bits_way_en_0[4]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_37 = io_req_bits_way_en_0[4]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_38 = io_req_bits_way_en_0[4]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_39 = io_req_bits_way_en_0[4]; // @[DCache.scala:49:7, :57:108] wire wMask_32 = eccMask_0 & _wMask_T_32; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_33 = eccMask_1 & _wMask_T_33; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_34 = eccMask_2 & _wMask_T_34; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_35 = eccMask_3 & _wMask_T_35; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_36 = eccMask_4 & _wMask_T_36; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_37 = eccMask_5 & _wMask_T_37; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_38 = eccMask_6 & _wMask_T_38; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_39 = eccMask_7 & _wMask_T_39; // @[DCache.scala:56:82, :57:{87,108}] wire _wMask_T_40 = io_req_bits_way_en_0[5]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_41 = io_req_bits_way_en_0[5]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_42 = io_req_bits_way_en_0[5]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_43 = io_req_bits_way_en_0[5]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_44 = io_req_bits_way_en_0[5]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_45 = io_req_bits_way_en_0[5]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_46 = io_req_bits_way_en_0[5]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_47 = io_req_bits_way_en_0[5]; // @[DCache.scala:49:7, :57:108] wire wMask_40 = eccMask_0 & _wMask_T_40; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_41 = eccMask_1 & _wMask_T_41; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_42 = eccMask_2 & _wMask_T_42; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_43 = eccMask_3 & _wMask_T_43; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_44 = eccMask_4 & _wMask_T_44; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_45 = eccMask_5 & _wMask_T_45; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_46 = eccMask_6 & _wMask_T_46; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_47 = eccMask_7 & _wMask_T_47; // @[DCache.scala:56:82, :57:{87,108}] wire _wMask_T_48 = io_req_bits_way_en_0[6]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_49 = io_req_bits_way_en_0[6]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_50 = io_req_bits_way_en_0[6]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_51 = io_req_bits_way_en_0[6]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_52 = io_req_bits_way_en_0[6]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_53 = io_req_bits_way_en_0[6]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_54 = io_req_bits_way_en_0[6]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_55 = io_req_bits_way_en_0[6]; // @[DCache.scala:49:7, :57:108] wire wMask_48 = eccMask_0 & _wMask_T_48; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_49 = eccMask_1 & _wMask_T_49; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_50 = eccMask_2 & _wMask_T_50; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_51 = eccMask_3 & _wMask_T_51; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_52 = eccMask_4 & _wMask_T_52; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_53 = eccMask_5 & _wMask_T_53; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_54 = eccMask_6 & _wMask_T_54; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_55 = eccMask_7 & _wMask_T_55; // @[DCache.scala:56:82, :57:{87,108}] wire _wMask_T_56 = io_req_bits_way_en_0[7]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_57 = io_req_bits_way_en_0[7]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_58 = io_req_bits_way_en_0[7]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_59 = io_req_bits_way_en_0[7]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_60 = io_req_bits_way_en_0[7]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_61 = io_req_bits_way_en_0[7]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_62 = io_req_bits_way_en_0[7]; // @[DCache.scala:49:7, :57:108] wire _wMask_T_63 = io_req_bits_way_en_0[7]; // @[DCache.scala:49:7, :57:108] wire wMask_56 = eccMask_0 & _wMask_T_56; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_57 = eccMask_1 & _wMask_T_57; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_58 = eccMask_2 & _wMask_T_58; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_59 = eccMask_3 & _wMask_T_59; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_60 = eccMask_4 & _wMask_T_60; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_61 = eccMask_5 & _wMask_T_61; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_62 = eccMask_6 & _wMask_T_62; // @[DCache.scala:56:82, :57:{87,108}] wire wMask_63 = eccMask_7 & _wMask_T_63; // @[DCache.scala:56:82, :57:{87,108}] wire [8:0] addr = io_req_bits_addr_0[11:3]; // @[DCache.scala:49:7, :59:31] wire [8:0] _rdata_data_WIRE = addr; // @[DCache.scala:59:31, :77:26] wire _rdata_T; // @[DCache.scala:72:17] wire _rdata_data_T_1; // @[DCache.scala:77:39] wire [7:0] _rdata_WIRE_0; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_1; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_2; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_3; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_4; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_5; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_6; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_7; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_8; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_9; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_10; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_11; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_12; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_13; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_14; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_15; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_16; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_17; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_18; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_19; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_20; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_21; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_22; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_23; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_24; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_25; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_26; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_27; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_28; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_29; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_30; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_31; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_32; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_33; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_34; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_35; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_36; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_37; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_38; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_39; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_40; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_41; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_42; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_43; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_44; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_45; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_46; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_47; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_48; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_49; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_50; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_51; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_52; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_53; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_54; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_55; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_56; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_57; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_58; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_59; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_60; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_61; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_62; // @[DCache.scala:75:32] wire [7:0] _rdata_WIRE_63; // @[DCache.scala:75:32] assign _rdata_T = rdata_valid & io_req_bits_write_0; // @[DCache.scala:49:7, :71:30, :72:17] wire [7:0] rdata_wData_0 = wWords_0[7:0]; // @[package.scala:211:50] assign _rdata_WIRE_0 = rdata_wData_0; // @[package.scala:211:50] assign _rdata_WIRE_8 = rdata_wData_0; // @[package.scala:211:50] assign _rdata_WIRE_16 = rdata_wData_0; // @[package.scala:211:50] assign _rdata_WIRE_24 = rdata_wData_0; // @[package.scala:211:50] assign _rdata_WIRE_32 = rdata_wData_0; // @[package.scala:211:50] assign _rdata_WIRE_40 = rdata_wData_0; // @[package.scala:211:50] assign _rdata_WIRE_48 = rdata_wData_0; // @[package.scala:211:50] assign _rdata_WIRE_56 = rdata_wData_0; // @[package.scala:211:50] wire [7:0] rdata_wData_1 = wWords_0[15:8]; // @[package.scala:211:50] assign _rdata_WIRE_1 = rdata_wData_1; // @[package.scala:211:50] assign _rdata_WIRE_9 = rdata_wData_1; // @[package.scala:211:50] assign _rdata_WIRE_17 = rdata_wData_1; // @[package.scala:211:50] assign _rdata_WIRE_25 = rdata_wData_1; // @[package.scala:211:50] assign _rdata_WIRE_33 = rdata_wData_1; // @[package.scala:211:50] assign _rdata_WIRE_41 = rdata_wData_1; // @[package.scala:211:50] assign _rdata_WIRE_49 = rdata_wData_1; // @[package.scala:211:50] assign _rdata_WIRE_57 = rdata_wData_1; // @[package.scala:211:50] wire [7:0] rdata_wData_2 = wWords_0[23:16]; // @[package.scala:211:50] assign _rdata_WIRE_2 = rdata_wData_2; // @[package.scala:211:50] assign _rdata_WIRE_10 = rdata_wData_2; // @[package.scala:211:50] assign _rdata_WIRE_18 = rdata_wData_2; // @[package.scala:211:50] assign _rdata_WIRE_26 = rdata_wData_2; // @[package.scala:211:50] assign _rdata_WIRE_34 = rdata_wData_2; // @[package.scala:211:50] assign _rdata_WIRE_42 = rdata_wData_2; // @[package.scala:211:50] assign _rdata_WIRE_50 = rdata_wData_2; // @[package.scala:211:50] assign _rdata_WIRE_58 = rdata_wData_2; // @[package.scala:211:50] wire [7:0] rdata_wData_3 = wWords_0[31:24]; // @[package.scala:211:50] assign _rdata_WIRE_3 = rdata_wData_3; // @[package.scala:211:50] assign _rdata_WIRE_11 = rdata_wData_3; // @[package.scala:211:50] assign _rdata_WIRE_19 = rdata_wData_3; // @[package.scala:211:50] assign _rdata_WIRE_27 = rdata_wData_3; // @[package.scala:211:50] assign _rdata_WIRE_35 = rdata_wData_3; // @[package.scala:211:50] assign _rdata_WIRE_43 = rdata_wData_3; // @[package.scala:211:50] assign _rdata_WIRE_51 = rdata_wData_3; // @[package.scala:211:50] assign _rdata_WIRE_59 = rdata_wData_3; // @[package.scala:211:50] wire [7:0] rdata_wData_4 = wWords_0[39:32]; // @[package.scala:211:50] assign _rdata_WIRE_4 = rdata_wData_4; // @[package.scala:211:50] assign _rdata_WIRE_12 = rdata_wData_4; // @[package.scala:211:50] assign _rdata_WIRE_20 = rdata_wData_4; // @[package.scala:211:50] assign _rdata_WIRE_28 = rdata_wData_4; // @[package.scala:211:50] assign _rdata_WIRE_36 = rdata_wData_4; // @[package.scala:211:50] assign _rdata_WIRE_44 = rdata_wData_4; // @[package.scala:211:50] assign _rdata_WIRE_52 = rdata_wData_4; // @[package.scala:211:50] assign _rdata_WIRE_60 = rdata_wData_4; // @[package.scala:211:50] wire [7:0] rdata_wData_5 = wWords_0[47:40]; // @[package.scala:211:50] assign _rdata_WIRE_5 = rdata_wData_5; // @[package.scala:211:50] assign _rdata_WIRE_13 = rdata_wData_5; // @[package.scala:211:50] assign _rdata_WIRE_21 = rdata_wData_5; // @[package.scala:211:50] assign _rdata_WIRE_29 = rdata_wData_5; // @[package.scala:211:50] assign _rdata_WIRE_37 = rdata_wData_5; // @[package.scala:211:50] assign _rdata_WIRE_45 = rdata_wData_5; // @[package.scala:211:50] assign _rdata_WIRE_53 = rdata_wData_5; // @[package.scala:211:50] assign _rdata_WIRE_61 = rdata_wData_5; // @[package.scala:211:50] wire [7:0] rdata_wData_6 = wWords_0[55:48]; // @[package.scala:211:50] assign _rdata_WIRE_6 = rdata_wData_6; // @[package.scala:211:50] assign _rdata_WIRE_14 = rdata_wData_6; // @[package.scala:211:50] assign _rdata_WIRE_22 = rdata_wData_6; // @[package.scala:211:50] assign _rdata_WIRE_30 = rdata_wData_6; // @[package.scala:211:50] assign _rdata_WIRE_38 = rdata_wData_6; // @[package.scala:211:50] assign _rdata_WIRE_46 = rdata_wData_6; // @[package.scala:211:50] assign _rdata_WIRE_54 = rdata_wData_6; // @[package.scala:211:50] assign _rdata_WIRE_62 = rdata_wData_6; // @[package.scala:211:50] wire [7:0] rdata_wData_7 = wWords_0[63:56]; // @[package.scala:211:50] assign _rdata_WIRE_7 = rdata_wData_7; // @[package.scala:211:50] assign _rdata_WIRE_15 = rdata_wData_7; // @[package.scala:211:50] assign _rdata_WIRE_23 = rdata_wData_7; // @[package.scala:211:50] assign _rdata_WIRE_31 = rdata_wData_7; // @[package.scala:211:50] assign _rdata_WIRE_39 = rdata_wData_7; // @[package.scala:211:50] assign _rdata_WIRE_47 = rdata_wData_7; // @[package.scala:211:50] assign _rdata_WIRE_55 = rdata_wData_7; // @[package.scala:211:50] assign _rdata_WIRE_63 = rdata_wData_7; // @[package.scala:211:50] wire _rdata_data_T = ~io_req_bits_write_0; // @[DCache.scala:49:7, :77:42] assign _rdata_data_T_1 = rdata_valid & _rdata_data_T; // @[DCache.scala:71:30, :77:{39,42}] wire [15:0] rdata_lo_lo = _rockettile_dcache_data_arrays_0_RW0_rdata[15:0]; // @[package.scala:45:27] wire [15:0] rdata_lo_hi = _rockettile_dcache_data_arrays_0_RW0_rdata[31:16]; // @[package.scala:45:27] wire [31:0] rdata_lo = {rdata_lo_hi, rdata_lo_lo}; // @[package.scala:45:27] wire [15:0] rdata_hi_lo = _rockettile_dcache_data_arrays_0_RW0_rdata[47:32]; // @[package.scala:45:27] wire [15:0] rdata_hi_hi = _rockettile_dcache_data_arrays_0_RW0_rdata[63:48]; // @[package.scala:45:27] wire [31:0] rdata_hi = {rdata_hi_hi, rdata_hi_lo}; // @[package.scala:45:27] assign rdata_0_0 = {rdata_hi, rdata_lo}; // @[package.scala:45:27] assign io_resp_0_0 = rdata_0_0; // @[package.scala:45:27] wire [15:0] rdata_lo_lo_1 = _rockettile_dcache_data_arrays_0_RW0_rdata[79:64]; // @[package.scala:45:27] wire [15:0] rdata_lo_hi_1 = _rockettile_dcache_data_arrays_0_RW0_rdata[95:80]; // @[package.scala:45:27] wire [31:0] rdata_lo_1 = {rdata_lo_hi_1, rdata_lo_lo_1}; // @[package.scala:45:27] wire [15:0] rdata_hi_lo_1 = _rockettile_dcache_data_arrays_0_RW0_rdata[111:96]; // @[package.scala:45:27] wire [15:0] rdata_hi_hi_1 = _rockettile_dcache_data_arrays_0_RW0_rdata[127:112]; // @[package.scala:45:27] wire [31:0] rdata_hi_1 = {rdata_hi_hi_1, rdata_hi_lo_1}; // @[package.scala:45:27] assign rdata_0_1 = {rdata_hi_1, rdata_lo_1}; // @[package.scala:45:27] assign io_resp_1_0 = rdata_0_1; // @[package.scala:45:27] wire [15:0] rdata_lo_lo_2 = _rockettile_dcache_data_arrays_0_RW0_rdata[143:128]; // @[package.scala:45:27] wire [15:0] rdata_lo_hi_2 = _rockettile_dcache_data_arrays_0_RW0_rdata[159:144]; // @[package.scala:45:27] wire [31:0] rdata_lo_2 = {rdata_lo_hi_2, rdata_lo_lo_2}; // @[package.scala:45:27] wire [15:0] rdata_hi_lo_2 = _rockettile_dcache_data_arrays_0_RW0_rdata[175:160]; // @[package.scala:45:27] wire [15:0] rdata_hi_hi_2 = _rockettile_dcache_data_arrays_0_RW0_rdata[191:176]; // @[package.scala:45:27] wire [31:0] rdata_hi_2 = {rdata_hi_hi_2, rdata_hi_lo_2}; // @[package.scala:45:27] assign rdata_0_2 = {rdata_hi_2, rdata_lo_2}; // @[package.scala:45:27] assign io_resp_2_0 = rdata_0_2; // @[package.scala:45:27] wire [15:0] rdata_lo_lo_3 = _rockettile_dcache_data_arrays_0_RW0_rdata[207:192]; // @[package.scala:45:27] wire [15:0] rdata_lo_hi_3 = _rockettile_dcache_data_arrays_0_RW0_rdata[223:208]; // @[package.scala:45:27] wire [31:0] rdata_lo_3 = {rdata_lo_hi_3, rdata_lo_lo_3}; // @[package.scala:45:27] wire [15:0] rdata_hi_lo_3 = _rockettile_dcache_data_arrays_0_RW0_rdata[239:224]; // @[package.scala:45:27] wire [15:0] rdata_hi_hi_3 = _rockettile_dcache_data_arrays_0_RW0_rdata[255:240]; // @[package.scala:45:27] wire [31:0] rdata_hi_3 = {rdata_hi_hi_3, rdata_hi_lo_3}; // @[package.scala:45:27] assign rdata_0_3 = {rdata_hi_3, rdata_lo_3}; // @[package.scala:45:27] assign io_resp_3_0 = rdata_0_3; // @[package.scala:45:27] wire [15:0] rdata_lo_lo_4 = _rockettile_dcache_data_arrays_0_RW0_rdata[271:256]; // @[package.scala:45:27] wire [15:0] rdata_lo_hi_4 = _rockettile_dcache_data_arrays_0_RW0_rdata[287:272]; // @[package.scala:45:27] wire [31:0] rdata_lo_4 = {rdata_lo_hi_4, rdata_lo_lo_4}; // @[package.scala:45:27] wire [15:0] rdata_hi_lo_4 = _rockettile_dcache_data_arrays_0_RW0_rdata[303:288]; // @[package.scala:45:27] wire [15:0] rdata_hi_hi_4 = _rockettile_dcache_data_arrays_0_RW0_rdata[319:304]; // @[package.scala:45:27] wire [31:0] rdata_hi_4 = {rdata_hi_hi_4, rdata_hi_lo_4}; // @[package.scala:45:27] assign rdata_0_4 = {rdata_hi_4, rdata_lo_4}; // @[package.scala:45:27] assign io_resp_4_0 = rdata_0_4; // @[package.scala:45:27] wire [15:0] rdata_lo_lo_5 = _rockettile_dcache_data_arrays_0_RW0_rdata[335:320]; // @[package.scala:45:27] wire [15:0] rdata_lo_hi_5 = _rockettile_dcache_data_arrays_0_RW0_rdata[351:336]; // @[package.scala:45:27] wire [31:0] rdata_lo_5 = {rdata_lo_hi_5, rdata_lo_lo_5}; // @[package.scala:45:27] wire [15:0] rdata_hi_lo_5 = _rockettile_dcache_data_arrays_0_RW0_rdata[367:352]; // @[package.scala:45:27] wire [15:0] rdata_hi_hi_5 = _rockettile_dcache_data_arrays_0_RW0_rdata[383:368]; // @[package.scala:45:27] wire [31:0] rdata_hi_5 = {rdata_hi_hi_5, rdata_hi_lo_5}; // @[package.scala:45:27] assign rdata_0_5 = {rdata_hi_5, rdata_lo_5}; // @[package.scala:45:27] assign io_resp_5_0 = rdata_0_5; // @[package.scala:45:27] wire [15:0] rdata_lo_lo_6 = _rockettile_dcache_data_arrays_0_RW0_rdata[399:384]; // @[package.scala:45:27] wire [15:0] rdata_lo_hi_6 = _rockettile_dcache_data_arrays_0_RW0_rdata[415:400]; // @[package.scala:45:27] wire [31:0] rdata_lo_6 = {rdata_lo_hi_6, rdata_lo_lo_6}; // @[package.scala:45:27] wire [15:0] rdata_hi_lo_6 = _rockettile_dcache_data_arrays_0_RW0_rdata[431:416]; // @[package.scala:45:27] wire [15:0] rdata_hi_hi_6 = _rockettile_dcache_data_arrays_0_RW0_rdata[447:432]; // @[package.scala:45:27] wire [31:0] rdata_hi_6 = {rdata_hi_hi_6, rdata_hi_lo_6}; // @[package.scala:45:27] assign rdata_0_6 = {rdata_hi_6, rdata_lo_6}; // @[package.scala:45:27] assign io_resp_6_0 = rdata_0_6; // @[package.scala:45:27] wire [15:0] rdata_lo_lo_7 = _rockettile_dcache_data_arrays_0_RW0_rdata[463:448]; // @[package.scala:45:27] wire [15:0] rdata_lo_hi_7 = _rockettile_dcache_data_arrays_0_RW0_rdata[479:464]; // @[package.scala:45:27] wire [31:0] rdata_lo_7 = {rdata_lo_hi_7, rdata_lo_lo_7}; // @[package.scala:45:27] wire [15:0] rdata_hi_lo_7 = _rockettile_dcache_data_arrays_0_RW0_rdata[495:480]; // @[package.scala:45:27] wire [15:0] rdata_hi_hi_7 = _rockettile_dcache_data_arrays_0_RW0_rdata[511:496]; // @[package.scala:45:27] wire [31:0] rdata_hi_7 = {rdata_hi_hi_7, rdata_hi_lo_7}; // @[package.scala:45:27] assign rdata_0_7 = {rdata_hi_7, rdata_lo_7}; // @[package.scala:45:27] assign io_resp_7_0 = rdata_0_7; // @[package.scala:45:27] rockettile_dcache_data_arrays_0_0 rockettile_dcache_data_arrays_0 ( // @[DescribedSRAM.scala:17:26] .RW0_addr (_rdata_T ? addr : _rdata_data_WIRE), // @[DescribedSRAM.scala:17:26] .RW0_en (_rdata_data_T_1 | _rdata_T), // @[DescribedSRAM.scala:17:26] .RW0_clk (clock), .RW0_wmode (io_req_bits_write_0), // @[DCache.scala:49:7] .RW0_wdata ({_rdata_WIRE_63, _rdata_WIRE_62, _rdata_WIRE_61, _rdata_WIRE_60, _rdata_WIRE_59, _rdata_WIRE_58, _rdata_WIRE_57, _rdata_WIRE_56, _rdata_WIRE_55, _rdata_WIRE_54, _rdata_WIRE_53, _rdata_WIRE_52, _rdata_WIRE_51, _rdata_WIRE_50, _rdata_WIRE_49, _rdata_WIRE_48, _rdata_WIRE_47, _rdata_WIRE_46, _rdata_WIRE_45, _rdata_WIRE_44, _rdata_WIRE_43, _rdata_WIRE_42, _rdata_WIRE_41, _rdata_WIRE_40, _rdata_WIRE_39, _rdata_WIRE_38, _rdata_WIRE_37, _rdata_WIRE_36, _rdata_WIRE_35, _rdata_WIRE_34, _rdata_WIRE_33, _rdata_WIRE_32, _rdata_WIRE_31, _rdata_WIRE_30, _rdata_WIRE_29, _rdata_WIRE_28, _rdata_WIRE_27, _rdata_WIRE_26, _rdata_WIRE_25, _rdata_WIRE_24, _rdata_WIRE_23, _rdata_WIRE_22, _rdata_WIRE_21, _rdata_WIRE_20, _rdata_WIRE_19, _rdata_WIRE_18, _rdata_WIRE_17, _rdata_WIRE_16, _rdata_WIRE_15, _rdata_WIRE_14, _rdata_WIRE_13, _rdata_WIRE_12, _rdata_WIRE_11, _rdata_WIRE_10, _rdata_WIRE_9, _rdata_WIRE_8, _rdata_WIRE_7, _rdata_WIRE_6, _rdata_WIRE_5, _rdata_WIRE_4, _rdata_WIRE_3, _rdata_WIRE_2, _rdata_WIRE_1, _rdata_WIRE_0}), // @[DescribedSRAM.scala:17:26] .RW0_rdata (_rockettile_dcache_data_arrays_0_RW0_rdata), .RW0_wmask ({wMask_63, wMask_62, wMask_61, wMask_60, wMask_59, wMask_58, wMask_57, wMask_56, wMask_55, wMask_54, wMask_53, wMask_52, wMask_51, wMask_50, wMask_49, wMask_48, wMask_47, wMask_46, wMask_45, wMask_44, wMask_43, wMask_42, wMask_41, wMask_40, wMask_39, wMask_38, wMask_37, wMask_36, wMask_35, wMask_34, wMask_33, wMask_32, wMask_31, wMask_30, wMask_29, wMask_28, wMask_27, wMask_26, wMask_25, wMask_24, wMask_23, wMask_22, wMask_21, wMask_20, wMask_19, wMask_18, wMask_17, wMask_16, wMask_15, wMask_14, wMask_13, wMask_12, wMask_11, wMask_10, wMask_9, wMask_8, wMask_7, wMask_6, wMask_5, wMask_4, wMask_3, wMask_2, wMask_1, wMask_0}) // @[DescribedSRAM.scala:17:26] ); // @[DescribedSRAM.scala:17:26] assign io_resp_0 = io_resp_0_0; // @[DCache.scala:49:7] assign io_resp_1 = io_resp_1_0; // @[DCache.scala:49:7] assign io_resp_2 = io_resp_2_0; // @[DCache.scala:49:7] assign io_resp_3 = io_resp_3_0; // @[DCache.scala:49:7] assign io_resp_4 = io_resp_4_0; // @[DCache.scala:49:7] assign io_resp_5 = io_resp_5_0; // @[DCache.scala:49:7] assign io_resp_6 = io_resp_6_0; // @[DCache.scala:49:7] assign io_resp_7 = io_resp_7_0; // @[DCache.scala:49:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_16( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [28:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [3:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [6:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [28:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7] wire [3:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [6:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7] wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire [8:0] c_first_beats1_decode = 9'h0; // @[Edges.scala:220:59] wire [8:0] c_first_beats1 = 9'h0; // @[Edges.scala:221:14] wire [8:0] _c_first_count_T = 9'h0; // @[Edges.scala:234:27] wire [8:0] c_first_count = 9'h0; // @[Edges.scala:234:25] wire [8:0] _c_first_counter_T = 9'h0; // @[Edges.scala:236:21] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_39 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_41 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_45 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_47 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_51 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_53 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_57 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_59 = 1'h1; // @[Parameters.scala:57:20] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire [8:0] c_first_counter1 = 9'h1FF; // @[Edges.scala:230:28] wire [9:0] _c_first_counter1_T = 10'h3FF; // @[Edges.scala:230:28] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [28:0] _c_first_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_first_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_first_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_first_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_set_wo_ready_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_set_wo_ready_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_opcodes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_opcodes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_sizes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_sizes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_opcodes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_opcodes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_sizes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_sizes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_probe_ack_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_probe_ack_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_probe_ack_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_probe_ack_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _same_cycle_resp_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _same_cycle_resp_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _same_cycle_resp_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _same_cycle_resp_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _same_cycle_resp_WIRE_4_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _same_cycle_resp_WIRE_5_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [6:0] _c_first_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_first_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_first_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_first_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_set_wo_ready_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_set_wo_ready_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_opcodes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_sizes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_sizes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_opcodes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_opcodes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_sizes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_sizes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_probe_ack_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_probe_ack_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_probe_ack_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_probe_ack_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] _c_set_wo_ready_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_wo_ready_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_sizes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_probe_ack_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_probe_ack_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [15:0] _a_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _c_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hFF; // @[Monitor.scala:724:57] wire [16:0] _a_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _c_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hFF; // @[Monitor.scala:724:57] wire [15:0] _a_size_lookup_T_3 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _c_size_lookup_T_3 = 16'h100; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h100; // @[Monitor.scala:724:51] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [1027:0] _c_sizes_set_T_1 = 1028'h0; // @[Monitor.scala:768:52] wire [9:0] _c_opcodes_set_T = 10'h0; // @[Monitor.scala:767:79] wire [9:0] _c_sizes_set_T = 10'h0; // @[Monitor.scala:768:77] wire [1026:0] _c_opcodes_set_T_1 = 1027'h0; // @[Monitor.scala:767:54] wire [4:0] _c_sizes_set_interm_T_1 = 5'h1; // @[Monitor.scala:766:59] wire [4:0] c_sizes_set_interm = 5'h0; // @[Monitor.scala:755:40] wire [4:0] _c_sizes_set_interm_T = 5'h0; // @[Monitor.scala:766:51] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [127:0] _c_set_wo_ready_T = 128'h1; // @[OneHot.scala:58:35] wire [127:0] _c_set_T = 128'h1; // @[OneHot.scala:58:35] wire [519:0] c_sizes_set = 520'h0; // @[Monitor.scala:741:34] wire [259:0] c_opcodes_set = 260'h0; // @[Monitor.scala:740:34] wire [64:0] c_set = 65'h0; // @[Monitor.scala:738:34] wire [64:0] c_set_wo_ready = 65'h0; // @[Monitor.scala:739:34] wire [11:0] _c_first_beats1_decode_T_2 = 12'h0; // @[package.scala:243:46] wire [11:0] _c_first_beats1_decode_T_1 = 12'hFFF; // @[package.scala:243:76] wire [26:0] _c_first_beats1_decode_T = 27'hFFF; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [3:0] _a_size_lookup_T_2 = 4'h8; // @[Monitor.scala:641:117] wire [3:0] _d_sizes_clr_T = 4'h8; // @[Monitor.scala:681:48] wire [3:0] _c_size_lookup_T_2 = 4'h8; // @[Monitor.scala:750:119] wire [3:0] _d_sizes_clr_T_6 = 4'h8; // @[Monitor.scala:791:48] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire [6:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_4 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire _source_ok_T = io_in_a_bits_source_0 == 7'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_13 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_19 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire _source_ok_T_2 = _source_ok_T_1 == 5'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_8 = _source_ok_T_7 == 5'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_14 = _source_ok_T_13 == 5'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_20 = _source_ok_T_19 == 5'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31] wire _source_ok_T_25 = io_in_a_bits_source_0 == 7'h22; // @[Monitor.scala:36:7] wire _source_ok_WIRE_5 = _source_ok_T_25; // @[Parameters.scala:1138:31] wire _source_ok_T_26 = io_in_a_bits_source_0 == 7'h21; // @[Monitor.scala:36:7] wire _source_ok_WIRE_6 = _source_ok_T_26; // @[Parameters.scala:1138:31] wire _source_ok_T_27 = io_in_a_bits_source_0 == 7'h20; // @[Monitor.scala:36:7] wire _source_ok_WIRE_7 = _source_ok_T_27; // @[Parameters.scala:1138:31] wire _source_ok_T_28 = io_in_a_bits_source_0 == 7'h40; // @[Monitor.scala:36:7] wire _source_ok_WIRE_8 = _source_ok_T_28; // @[Parameters.scala:1138:31] wire _source_ok_T_29 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_30 = _source_ok_T_29 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_31 = _source_ok_T_30 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_32 = _source_ok_T_31 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_33 = _source_ok_T_32 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_34 = _source_ok_T_33 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_35 = _source_ok_T_34 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46] wire source_ok = _source_ok_T_35 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46] wire [26:0] _GEN = 27'hFFF << io_in_a_bits_size_0; // @[package.scala:243:71] wire [26:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [11:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [28:0] _is_aligned_T = {17'h0, io_in_a_bits_address_0[11:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 29'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 4'h2; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_36 = io_in_d_bits_source_0 == 7'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_0 = _source_ok_T_36; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] _source_ok_T_37 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_43 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_49 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_55 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire _source_ok_T_38 = _source_ok_T_37 == 5'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_40 = _source_ok_T_38; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_42 = _source_ok_T_40; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_1 = _source_ok_T_42; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_44 = _source_ok_T_43 == 5'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_46 = _source_ok_T_44; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_48 = _source_ok_T_46; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_2 = _source_ok_T_48; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_50 = _source_ok_T_49 == 5'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_52 = _source_ok_T_50; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_54 = _source_ok_T_52; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_3 = _source_ok_T_54; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_56 = _source_ok_T_55 == 5'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_58 = _source_ok_T_56; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_60 = _source_ok_T_58; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_4 = _source_ok_T_60; // @[Parameters.scala:1138:31] wire _source_ok_T_61 = io_in_d_bits_source_0 == 7'h22; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_5 = _source_ok_T_61; // @[Parameters.scala:1138:31] wire _source_ok_T_62 = io_in_d_bits_source_0 == 7'h21; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_6 = _source_ok_T_62; // @[Parameters.scala:1138:31] wire _source_ok_T_63 = io_in_d_bits_source_0 == 7'h20; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_7 = _source_ok_T_63; // @[Parameters.scala:1138:31] wire _source_ok_T_64 = io_in_d_bits_source_0 == 7'h40; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_8 = _source_ok_T_64; // @[Parameters.scala:1138:31] wire _source_ok_T_65 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_66 = _source_ok_T_65 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_67 = _source_ok_T_66 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_68 = _source_ok_T_67 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_69 = _source_ok_T_68 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_70 = _source_ok_T_69 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_71 = _source_ok_T_70 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46] wire source_ok_1 = _source_ok_T_71 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46] wire _T_1548 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_1548; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_1548; // @[Decoupled.scala:51:35] wire [11:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [8:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [8:0] a_first_counter; // @[Edges.scala:229:27] wire [9:0] _a_first_counter1_T = {1'h0, a_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] a_first_counter1 = _a_first_counter1_T[8:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [8:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [8:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [3:0] size; // @[Monitor.scala:389:22] reg [6:0] source; // @[Monitor.scala:390:22] reg [28:0] address; // @[Monitor.scala:391:22] wire _T_1616 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_1616; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_1616; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_1616; // @[Decoupled.scala:51:35] wire [26:0] _GEN_0 = 27'hFFF << io_in_d_bits_size_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [11:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [8:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] d_first_counter; // @[Edges.scala:229:27] wire [9:0] _d_first_counter1_T = {1'h0, d_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] d_first_counter1 = _d_first_counter1_T[8:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [8:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [8:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [3:0] size_1; // @[Monitor.scala:540:22] reg [6:0] source_1; // @[Monitor.scala:541:22] reg sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [64:0] inflight; // @[Monitor.scala:614:27] reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [519:0] inflight_sizes; // @[Monitor.scala:618:33] wire [11:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [8:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [8:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [8:0] a_first_counter_1; // @[Edges.scala:229:27] wire [9:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] a_first_counter1_1 = _a_first_counter1_T_1[8:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [8:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [8:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [11:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [8:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46] wire [8:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] d_first_counter_1; // @[Edges.scala:229:27] wire [9:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] d_first_counter1_1 = _d_first_counter1_T_1[8:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [8:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [8:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [64:0] a_set; // @[Monitor.scala:626:34] wire [64:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [259:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [519:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [9:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [9:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [9:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [9:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [9:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [259:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [259:0] _a_opcode_lookup_T_6 = {256'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [259:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [7:0] a_size_lookup; // @[Monitor.scala:639:33] wire [9:0] _GEN_2 = {io_in_d_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :641:65] wire [9:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65] wire [9:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_2; // @[Monitor.scala:641:65, :681:99] wire [9:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65, :750:67] wire [9:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_2; // @[Monitor.scala:641:65, :791:99] wire [519:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [519:0] _a_size_lookup_T_6 = {512'h0, _a_size_lookup_T_1[7:0]}; // @[Monitor.scala:641:{40,91}] wire [519:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[519:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[7:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [4:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [127:0] _GEN_3 = 128'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [127:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_3; // @[OneHot.scala:58:35] wire [127:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_3; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_1481 = _T_1548 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_1481 ? _a_set_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_1481 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [4:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [4:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_1481 ? _a_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [9:0] _a_opcodes_set_T = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [1026:0] _a_opcodes_set_T_1 = {1023'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_1481 ? _a_opcodes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [9:0] _a_sizes_set_T = {io_in_a_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :660:77] wire [1027:0] _a_sizes_set_T_1 = {1023'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_1481 ? _a_sizes_set_T_1[519:0] : 520'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [64:0] d_clr; // @[Monitor.scala:664:34] wire [64:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [259:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [519:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46] wire _T_1527 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [127:0] _GEN_5 = 128'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [127:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35] wire [127:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35] wire [127:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35] wire [127:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_1527 & ~d_release_ack ? _d_clr_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_1496 = _T_1616 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_1496 ? _d_clr_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [1038:0] _d_opcodes_clr_T_5 = 1039'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_1496 ? _d_opcodes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [1038:0] _d_sizes_clr_T_5 = 1039'hFF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_1496 ? _d_sizes_clr_T_5[519:0] : 520'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [64:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [64:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [64:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [259:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [259:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [259:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [519:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [519:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [519:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [64:0] inflight_1; // @[Monitor.scala:726:35] wire [64:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [259:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [259:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [519:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [519:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [11:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [8:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[11:3]; // @[package.scala:243:46] wire [8:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] d_first_counter_2; // @[Edges.scala:229:27] wire [9:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] d_first_counter1_2 = _d_first_counter1_T_2[8:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 9'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 9'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [8:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [8:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [7:0] c_size_lookup; // @[Monitor.scala:748:35] wire [259:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [259:0] _c_opcode_lookup_T_6 = {256'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [259:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [519:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [519:0] _c_size_lookup_T_6 = {512'h0, _c_size_lookup_T_1[7:0]}; // @[Monitor.scala:750:{42,93}] wire [519:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[519:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[7:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [64:0] d_clr_1; // @[Monitor.scala:774:34] wire [64:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [259:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [519:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_1592 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_1592 & d_release_ack_1 ? _d_clr_wo_ready_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_1574 = _T_1616 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_1574 ? _d_clr_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [1038:0] _d_opcodes_clr_T_11 = 1039'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_1574 ? _d_opcodes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [1038:0] _d_sizes_clr_T_11 = 1039'hFF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_1574 ? _d_sizes_clr_T_11[519:0] : 520'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 7'h0; // @[Monitor.scala:36:7, :795:113] wire [64:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [64:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [259:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [259:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [519:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [519:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File ClockDomain.scala: package freechips.rocketchip.prci import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ abstract class Domain(implicit p: Parameters) extends LazyModule with HasDomainCrossing { def clockBundle: ClockBundle lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { childClock := clockBundle.clock childReset := clockBundle.reset override def provideImplicitClockToLazyChildren = true // these are just for backwards compatibility with external devices // that were manually wiring themselves to the domain's clock/reset input: val clock = IO(Output(chiselTypeOf(clockBundle.clock))) val reset = IO(Output(chiselTypeOf(clockBundle.reset))) clock := clockBundle.clock reset := clockBundle.reset } } abstract class ClockDomain(implicit p: Parameters) extends Domain with HasClockDomainCrossing class ClockSinkDomain(val clockSinkParams: ClockSinkParameters)(implicit p: Parameters) extends ClockDomain { def this(take: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSinkParameters(take = take, name = name)) val clockNode = ClockSinkNode(Seq(clockSinkParams)) def clockBundle = clockNode.in.head._1 override lazy val desiredName = (clockSinkParams.name.toSeq :+ "ClockSinkDomain").mkString } class ClockSourceDomain(val clockSourceParams: ClockSourceParameters)(implicit p: Parameters) extends ClockDomain { def this(give: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSourceParameters(give = give, name = name)) val clockNode = ClockSourceNode(Seq(clockSourceParams)) def clockBundle = clockNode.out.head._1 override lazy val desiredName = (clockSourceParams.name.toSeq :+ "ClockSourceDomain").mkString } abstract class ResetDomain(implicit p: Parameters) extends Domain with HasResetDomainCrossing File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File NoC.scala: package constellation.noc import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp, BundleBridgeSink, InModuleBody} import freechips.rocketchip.util.ElaborationArtefacts import freechips.rocketchip.prci._ import constellation.router._ import constellation.channel._ import constellation.routing.{RoutingRelation, ChannelRoutingInfo} import constellation.topology.{PhysicalTopology, UnidirectionalLine} class NoCTerminalIO( val ingressParams: Seq[IngressChannelParams], val egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Bundle { val ingress = MixedVec(ingressParams.map { u => Flipped(new IngressChannel(u)) }) val egress = MixedVec(egressParams.map { u => new EgressChannel(u) }) } class NoC(nocParams: NoCParams)(implicit p: Parameters) extends LazyModule { override def shouldBeInlined = nocParams.inlineNoC val internalParams = InternalNoCParams(nocParams) val allChannelParams = internalParams.channelParams val allIngressParams = internalParams.ingressParams val allEgressParams = internalParams.egressParams val allRouterParams = internalParams.routerParams val iP = p.alterPartial({ case InternalNoCKey => internalParams }) val nNodes = nocParams.topology.nNodes val nocName = nocParams.nocName val skipValidationChecks = nocParams.skipValidationChecks val clockSourceNodes = Seq.tabulate(nNodes) { i => ClockSourceNode(Seq(ClockSourceParameters())) } val router_sink_domains = Seq.tabulate(nNodes) { i => val router_sink_domain = LazyModule(new ClockSinkDomain(ClockSinkParameters( name = Some(s"${nocName}_router_$i") ))) router_sink_domain.clockNode := clockSourceNodes(i) router_sink_domain } val routers = Seq.tabulate(nNodes) { i => router_sink_domains(i) { val inParams = allChannelParams.filter(_.destId == i).map( _.copy(payloadBits=allRouterParams(i).user.payloadBits) ) val outParams = allChannelParams.filter(_.srcId == i).map( _.copy(payloadBits=allRouterParams(i).user.payloadBits) ) val ingressParams = allIngressParams.filter(_.destId == i).map( _.copy(payloadBits=allRouterParams(i).user.payloadBits) ) val egressParams = allEgressParams.filter(_.srcId == i).map( _.copy(payloadBits=allRouterParams(i).user.payloadBits) ) val noIn = inParams.size + ingressParams.size == 0 val noOut = outParams.size + egressParams.size == 0 if (noIn || noOut) { println(s"Constellation WARNING: $nocName router $i seems to be unused, it will not be generated") None } else { Some(LazyModule(new Router( routerParams = allRouterParams(i), preDiplomaticInParams = inParams, preDiplomaticIngressParams = ingressParams, outDests = outParams.map(_.destId), egressIds = egressParams.map(_.egressId) )(iP))) } }}.flatten val ingressNodes = allIngressParams.map { u => IngressChannelSourceNode(u.destId) } val egressNodes = allEgressParams.map { u => EgressChannelDestNode(u) } // Generate channels between routers diplomatically Seq.tabulate(nNodes, nNodes) { case (i, j) => if (i != j) { val routerI = routers.find(_.nodeId == i) val routerJ = routers.find(_.nodeId == j) if (routerI.isDefined && routerJ.isDefined) { val sourceNodes: Seq[ChannelSourceNode] = routerI.get.sourceNodes.filter(_.destId == j) val destNodes: Seq[ChannelDestNode] = routerJ.get.destNodes.filter(_.destParams.srcId == i) require (sourceNodes.size == destNodes.size) (sourceNodes zip destNodes).foreach { case (src, dst) => val channelParam = allChannelParams.find(c => c.srcId == i && c.destId == j).get router_sink_domains(j) { implicit val p: Parameters = iP (dst := ChannelWidthWidget(routerJ.get.payloadBits, routerI.get.payloadBits) := channelParam.channelGen(p)(src) ) } } } }} // Generate terminal channels diplomatically routers.foreach { dst => router_sink_domains(dst.nodeId) { implicit val p: Parameters = iP dst.ingressNodes.foreach(n => { val ingressId = n.destParams.ingressId require(dst.payloadBits <= allIngressParams(ingressId).payloadBits) (n := IngressWidthWidget(dst.payloadBits, allIngressParams(ingressId).payloadBits) := ingressNodes(ingressId) ) }) dst.egressNodes.foreach(n => { val egressId = n.egressId require(dst.payloadBits <= allEgressParams(egressId).payloadBits) (egressNodes(egressId) := EgressWidthWidget(allEgressParams(egressId).payloadBits, dst.payloadBits) := n ) }) }} val debugNodes = routers.map { r => val sink = BundleBridgeSink[DebugBundle]() sink := r.debugNode sink } val ctrlNodes = if (nocParams.hasCtrl) { (0 until nNodes).map { i => routers.find(_.nodeId == i).map { r => val sink = BundleBridgeSink[RouterCtrlBundle]() sink := r.ctrlNode.get sink } } } else { Nil } println(s"Constellation: $nocName Finished parameter validation") lazy val module = new Impl class Impl extends LazyModuleImp(this) { println(s"Constellation: $nocName Starting NoC RTL generation") val io = IO(new NoCTerminalIO(allIngressParams, allEgressParams)(iP) { val router_clocks = Vec(nNodes, Input(new ClockBundle(ClockBundleParameters()))) val router_ctrl = if (nocParams.hasCtrl) Vec(nNodes, new RouterCtrlBundle) else Nil }) (io.ingress zip ingressNodes.map(_.out(0)._1)).foreach { case (l,r) => r <> l } (io.egress zip egressNodes .map(_.in (0)._1)).foreach { case (l,r) => l <> r } (io.router_clocks zip clockSourceNodes.map(_.out(0)._1)).foreach { case (l,r) => l <> r } if (nocParams.hasCtrl) { ctrlNodes.zipWithIndex.map { case (c,i) => if (c.isDefined) { io.router_ctrl(i) <> c.get.in(0)._1 } else { io.router_ctrl(i) <> DontCare } } } // TODO: These assume a single clock-domain across the entire noc val debug_va_stall_ctr = RegInit(0.U(64.W)) val debug_sa_stall_ctr = RegInit(0.U(64.W)) val debug_any_stall_ctr = debug_va_stall_ctr + debug_sa_stall_ctr debug_va_stall_ctr := debug_va_stall_ctr + debugNodes.map(_.in(0)._1.va_stall.reduce(_+_)).reduce(_+_) debug_sa_stall_ctr := debug_sa_stall_ctr + debugNodes.map(_.in(0)._1.sa_stall.reduce(_+_)).reduce(_+_) dontTouch(debug_va_stall_ctr) dontTouch(debug_sa_stall_ctr) dontTouch(debug_any_stall_ctr) def prepend(s: String) = Seq(nocName, s).mkString(".") ElaborationArtefacts.add(prepend("noc.graphml"), graphML) val adjList = routers.map { r => val outs = r.outParams.map(o => s"${o.destId}").mkString(" ") val egresses = r.egressParams.map(e => s"e${e.egressId}").mkString(" ") val ingresses = r.ingressParams.map(i => s"i${i.ingressId} ${r.nodeId}") (Seq(s"${r.nodeId} $outs $egresses") ++ ingresses).mkString("\n") }.mkString("\n") ElaborationArtefacts.add(prepend("noc.adjlist"), adjList) val xys = routers.map(r => { val n = r.nodeId val ids = (Seq(r.nodeId.toString) ++ r.egressParams.map(e => s"e${e.egressId}") ++ r.ingressParams.map(i => s"i${i.ingressId}") ) val plotter = nocParams.topology.plotter val coords = (Seq(plotter.node(r.nodeId)) ++ Seq.tabulate(r.egressParams.size ) { i => plotter. egress(i, r. egressParams.size, r.nodeId) } ++ Seq.tabulate(r.ingressParams.size) { i => plotter.ingress(i, r.ingressParams.size, r.nodeId) } ) (ids zip coords).map { case (i, (x, y)) => s"$i $x $y" }.mkString("\n") }).mkString("\n") ElaborationArtefacts.add(prepend("noc.xy"), xys) val edgeProps = routers.map { r => val outs = r.outParams.map { o => (Seq(s"${r.nodeId} ${o.destId}") ++ (if (o.possibleFlows.size == 0) Some("unused") else None)) .mkString(" ") } val egresses = r.egressParams.map { e => (Seq(s"${r.nodeId} e${e.egressId}") ++ (if (e.possibleFlows.size == 0) Some("unused") else None)) .mkString(" ") } val ingresses = r.ingressParams.map { i => (Seq(s"i${i.ingressId} ${r.nodeId}") ++ (if (i.possibleFlows.size == 0) Some("unused") else None)) .mkString(" ") } (outs ++ egresses ++ ingresses).mkString("\n") }.mkString("\n") ElaborationArtefacts.add(prepend("noc.edgeprops"), edgeProps) println(s"Constellation: $nocName Finished NoC RTL generation") } }
module TLNoC_1_router_4ClockSinkDomain( // @[ClockDomain.scala:14:9] output [2:0] auto_routers_debug_out_va_stall_0, // @[LazyModuleImp.scala:107:25] output [2:0] auto_routers_debug_out_va_stall_2, // @[LazyModuleImp.scala:107:25] output [2:0] auto_routers_debug_out_sa_stall_0, // @[LazyModuleImp.scala:107:25] output [2:0] auto_routers_debug_out_sa_stall_2, // @[LazyModuleImp.scala:107:25] input auto_routers_egress_nodes_out_2_flit_ready, // @[LazyModuleImp.scala:107:25] output auto_routers_egress_nodes_out_2_flit_valid, // @[LazyModuleImp.scala:107:25] output auto_routers_egress_nodes_out_2_flit_bits_head, // @[LazyModuleImp.scala:107:25] output auto_routers_egress_nodes_out_2_flit_bits_tail, // @[LazyModuleImp.scala:107:25] input auto_routers_egress_nodes_out_1_flit_ready, // @[LazyModuleImp.scala:107:25] output auto_routers_egress_nodes_out_1_flit_valid, // @[LazyModuleImp.scala:107:25] output auto_routers_egress_nodes_out_1_flit_bits_head, // @[LazyModuleImp.scala:107:25] output auto_routers_egress_nodes_out_1_flit_bits_tail, // @[LazyModuleImp.scala:107:25] input auto_routers_egress_nodes_out_0_flit_ready, // @[LazyModuleImp.scala:107:25] output auto_routers_egress_nodes_out_0_flit_valid, // @[LazyModuleImp.scala:107:25] output auto_routers_egress_nodes_out_0_flit_bits_head, // @[LazyModuleImp.scala:107:25] output auto_routers_egress_nodes_out_0_flit_bits_tail, // @[LazyModuleImp.scala:107:25] output [72:0] auto_routers_egress_nodes_out_0_flit_bits_payload, // @[LazyModuleImp.scala:107:25] output auto_routers_ingress_nodes_in_1_flit_ready, // @[LazyModuleImp.scala:107:25] input auto_routers_ingress_nodes_in_1_flit_valid, // @[LazyModuleImp.scala:107:25] input auto_routers_ingress_nodes_in_1_flit_bits_head, // @[LazyModuleImp.scala:107:25] input auto_routers_ingress_nodes_in_1_flit_bits_tail, // @[LazyModuleImp.scala:107:25] input [72:0] auto_routers_ingress_nodes_in_1_flit_bits_payload, // @[LazyModuleImp.scala:107:25] output auto_routers_source_nodes_out_flit_0_valid, // @[LazyModuleImp.scala:107:25] output auto_routers_source_nodes_out_flit_0_bits_head, // @[LazyModuleImp.scala:107:25] output auto_routers_source_nodes_out_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25] output [72:0] auto_routers_source_nodes_out_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25] output [2:0] auto_routers_source_nodes_out_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25] output [4:0] auto_routers_source_nodes_out_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25] output [1:0] auto_routers_source_nodes_out_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25] output [4:0] auto_routers_source_nodes_out_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25] output [1:0] auto_routers_source_nodes_out_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25] output [2:0] auto_routers_source_nodes_out_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25] input [4:0] auto_routers_source_nodes_out_credit_return, // @[LazyModuleImp.scala:107:25] input [4:0] auto_routers_source_nodes_out_vc_free, // @[LazyModuleImp.scala:107:25] input auto_routers_dest_nodes_in_flit_0_valid, // @[LazyModuleImp.scala:107:25] input auto_routers_dest_nodes_in_flit_0_bits_head, // @[LazyModuleImp.scala:107:25] input auto_routers_dest_nodes_in_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25] input [72:0] auto_routers_dest_nodes_in_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25] input [2:0] auto_routers_dest_nodes_in_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25] input [4:0] auto_routers_dest_nodes_in_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25] input [1:0] auto_routers_dest_nodes_in_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25] input [4:0] auto_routers_dest_nodes_in_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25] input [1:0] auto_routers_dest_nodes_in_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25] input [2:0] auto_routers_dest_nodes_in_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25] output [4:0] auto_routers_dest_nodes_in_credit_return, // @[LazyModuleImp.scala:107:25] output [4:0] auto_routers_dest_nodes_in_vc_free, // @[LazyModuleImp.scala:107:25] input auto_clock_in_clock, // @[LazyModuleImp.scala:107:25] input auto_clock_in_reset // @[LazyModuleImp.scala:107:25] ); Router_33 routers ( // @[NoC.scala:67:22] .clock (auto_clock_in_clock), .reset (auto_clock_in_reset), .auto_debug_out_va_stall_0 (auto_routers_debug_out_va_stall_0), .auto_debug_out_va_stall_2 (auto_routers_debug_out_va_stall_2), .auto_debug_out_sa_stall_0 (auto_routers_debug_out_sa_stall_0), .auto_debug_out_sa_stall_2 (auto_routers_debug_out_sa_stall_2), .auto_egress_nodes_out_2_flit_ready (auto_routers_egress_nodes_out_2_flit_ready), .auto_egress_nodes_out_2_flit_valid (auto_routers_egress_nodes_out_2_flit_valid), .auto_egress_nodes_out_2_flit_bits_head (auto_routers_egress_nodes_out_2_flit_bits_head), .auto_egress_nodes_out_2_flit_bits_tail (auto_routers_egress_nodes_out_2_flit_bits_tail), .auto_egress_nodes_out_1_flit_ready (auto_routers_egress_nodes_out_1_flit_ready), .auto_egress_nodes_out_1_flit_valid (auto_routers_egress_nodes_out_1_flit_valid), .auto_egress_nodes_out_1_flit_bits_head (auto_routers_egress_nodes_out_1_flit_bits_head), .auto_egress_nodes_out_1_flit_bits_tail (auto_routers_egress_nodes_out_1_flit_bits_tail), .auto_egress_nodes_out_0_flit_ready (auto_routers_egress_nodes_out_0_flit_ready), .auto_egress_nodes_out_0_flit_valid (auto_routers_egress_nodes_out_0_flit_valid), .auto_egress_nodes_out_0_flit_bits_head (auto_routers_egress_nodes_out_0_flit_bits_head), .auto_egress_nodes_out_0_flit_bits_tail (auto_routers_egress_nodes_out_0_flit_bits_tail), .auto_egress_nodes_out_0_flit_bits_payload (auto_routers_egress_nodes_out_0_flit_bits_payload), .auto_ingress_nodes_in_1_flit_ready (auto_routers_ingress_nodes_in_1_flit_ready), .auto_ingress_nodes_in_1_flit_valid (auto_routers_ingress_nodes_in_1_flit_valid), .auto_ingress_nodes_in_1_flit_bits_head (auto_routers_ingress_nodes_in_1_flit_bits_head), .auto_ingress_nodes_in_1_flit_bits_tail (auto_routers_ingress_nodes_in_1_flit_bits_tail), .auto_ingress_nodes_in_1_flit_bits_payload (auto_routers_ingress_nodes_in_1_flit_bits_payload), .auto_source_nodes_out_flit_0_valid (auto_routers_source_nodes_out_flit_0_valid), .auto_source_nodes_out_flit_0_bits_head (auto_routers_source_nodes_out_flit_0_bits_head), .auto_source_nodes_out_flit_0_bits_tail (auto_routers_source_nodes_out_flit_0_bits_tail), .auto_source_nodes_out_flit_0_bits_payload (auto_routers_source_nodes_out_flit_0_bits_payload), .auto_source_nodes_out_flit_0_bits_flow_vnet_id (auto_routers_source_nodes_out_flit_0_bits_flow_vnet_id), .auto_source_nodes_out_flit_0_bits_flow_ingress_node (auto_routers_source_nodes_out_flit_0_bits_flow_ingress_node), .auto_source_nodes_out_flit_0_bits_flow_ingress_node_id (auto_routers_source_nodes_out_flit_0_bits_flow_ingress_node_id), .auto_source_nodes_out_flit_0_bits_flow_egress_node (auto_routers_source_nodes_out_flit_0_bits_flow_egress_node), .auto_source_nodes_out_flit_0_bits_flow_egress_node_id (auto_routers_source_nodes_out_flit_0_bits_flow_egress_node_id), .auto_source_nodes_out_flit_0_bits_virt_channel_id (auto_routers_source_nodes_out_flit_0_bits_virt_channel_id), .auto_source_nodes_out_credit_return (auto_routers_source_nodes_out_credit_return), .auto_source_nodes_out_vc_free (auto_routers_source_nodes_out_vc_free), .auto_dest_nodes_in_flit_0_valid (auto_routers_dest_nodes_in_flit_0_valid), .auto_dest_nodes_in_flit_0_bits_head (auto_routers_dest_nodes_in_flit_0_bits_head), .auto_dest_nodes_in_flit_0_bits_tail (auto_routers_dest_nodes_in_flit_0_bits_tail), .auto_dest_nodes_in_flit_0_bits_payload (auto_routers_dest_nodes_in_flit_0_bits_payload), .auto_dest_nodes_in_flit_0_bits_flow_vnet_id (auto_routers_dest_nodes_in_flit_0_bits_flow_vnet_id), .auto_dest_nodes_in_flit_0_bits_flow_ingress_node (auto_routers_dest_nodes_in_flit_0_bits_flow_ingress_node), .auto_dest_nodes_in_flit_0_bits_flow_ingress_node_id (auto_routers_dest_nodes_in_flit_0_bits_flow_ingress_node_id), .auto_dest_nodes_in_flit_0_bits_flow_egress_node (auto_routers_dest_nodes_in_flit_0_bits_flow_egress_node), .auto_dest_nodes_in_flit_0_bits_flow_egress_node_id (auto_routers_dest_nodes_in_flit_0_bits_flow_egress_node_id), .auto_dest_nodes_in_flit_0_bits_virt_channel_id (auto_routers_dest_nodes_in_flit_0_bits_virt_channel_id), .auto_dest_nodes_in_credit_return (auto_routers_dest_nodes_in_credit_return), .auto_dest_nodes_in_vc_free (auto_routers_dest_nodes_in_vc_free) ); // @[NoC.scala:67:22] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftRegisterPriorityQueue.scala: package compressacc import chisel3._ import chisel3.util._ import chisel3.util._ // TODO : support enq & deq at the same cycle class PriorityQueueStageIO(keyWidth: Int, value: ValueInfo) extends Bundle { val output_prev = KeyValue(keyWidth, value) val output_nxt = KeyValue(keyWidth, value) val input_prev = Flipped(KeyValue(keyWidth, value)) val input_nxt = Flipped(KeyValue(keyWidth, value)) val cmd = Flipped(Valid(UInt(1.W))) val insert_here = Input(Bool()) val cur_input_keyval = Flipped(KeyValue(keyWidth, value)) val cur_output_keyval = KeyValue(keyWidth, value) } class PriorityQueueStage(keyWidth: Int, value: ValueInfo) extends Module { val io = IO(new PriorityQueueStageIO(keyWidth, value)) dontTouch(io) val CMD_DEQ = 0.U val CMD_ENQ = 1.U val MAX_VALUE = (1 << keyWidth) - 1 val key_reg = RegInit(MAX_VALUE.U(keyWidth.W)) val value_reg = Reg(value) io.output_prev.key := key_reg io.output_prev.value := value_reg io.output_nxt.key := key_reg io.output_nxt.value := value_reg io.cur_output_keyval.key := key_reg io.cur_output_keyval.value := value_reg when (io.cmd.valid) { switch (io.cmd.bits) { is (CMD_DEQ) { key_reg := io.input_nxt.key value_reg := io.input_nxt.value } is (CMD_ENQ) { when (io.insert_here) { key_reg := io.cur_input_keyval.key value_reg := io.cur_input_keyval.value } .elsewhen (key_reg >= io.cur_input_keyval.key) { key_reg := io.input_prev.key value_reg := io.input_prev.value } .otherwise { // do nothing } } } } } object PriorityQueueStage { def apply(keyWidth: Int, v: ValueInfo): PriorityQueueStage = new PriorityQueueStage(keyWidth, v) } // TODO // - This design is not scalable as the enqued_keyval is broadcasted to all the stages // - Add pipeline registers later class PriorityQueueIO(queSize: Int, keyWidth: Int, value: ValueInfo) extends Bundle { val cnt_bits = log2Ceil(queSize+1) val counter = Output(UInt(cnt_bits.W)) val enq = Flipped(Decoupled(KeyValue(keyWidth, value))) val deq = Decoupled(KeyValue(keyWidth, value)) } class PriorityQueue(queSize: Int, keyWidth: Int, value: ValueInfo) extends Module { val keyWidthInternal = keyWidth + 1 val CMD_DEQ = 0.U val CMD_ENQ = 1.U val io = IO(new PriorityQueueIO(queSize, keyWidthInternal, value)) dontTouch(io) val MAX_VALUE = ((1 << keyWidthInternal) - 1).U val cnt_bits = log2Ceil(queSize+1) // do not consider cases where we are inserting more entries then the queSize val counter = RegInit(0.U(cnt_bits.W)) io.counter := counter val full = (counter === queSize.U) val empty = (counter === 0.U) io.deq.valid := !empty io.enq.ready := !full when (io.enq.fire) { counter := counter + 1.U } when (io.deq.fire) { counter := counter - 1.U } val cmd_valid = io.enq.valid || io.deq.ready val cmd = Mux(io.enq.valid, CMD_ENQ, CMD_DEQ) assert(!(io.enq.valid && io.deq.ready)) val stages = Seq.fill(queSize)(Module(new PriorityQueueStage(keyWidthInternal, value))) for (i <- 0 until (queSize - 1)) { stages(i+1).io.input_prev <> stages(i).io.output_nxt stages(i).io.input_nxt <> stages(i+1).io.output_prev } stages(queSize-1).io.input_nxt.key := MAX_VALUE // stages(queSize-1).io.input_nxt.value := stages(queSize-1).io.input_nxt.value.symbol := 0.U // stages(queSize-1).io.input_nxt.value.child(0) := 0.U // stages(queSize-1).io.input_nxt.value.child(1) := 0.U stages(0).io.input_prev.key := io.enq.bits.key stages(0).io.input_prev.value <> io.enq.bits.value for (i <- 0 until queSize) { stages(i).io.cmd.valid := cmd_valid stages(i).io.cmd.bits := cmd stages(i).io.cur_input_keyval <> io.enq.bits } val is_large_or_equal = WireInit(VecInit(Seq.fill(queSize)(false.B))) for (i <- 0 until queSize) { is_large_or_equal(i) := (stages(i).io.cur_output_keyval.key >= io.enq.bits.key) } val is_large_or_equal_cat = Wire(UInt(queSize.W)) is_large_or_equal_cat := Cat(is_large_or_equal.reverse) val insert_here_idx = PriorityEncoder(is_large_or_equal_cat) for (i <- 0 until queSize) { when (i.U === insert_here_idx) { stages(i).io.insert_here := true.B } .otherwise { stages(i).io.insert_here := false.B } } io.deq.bits <> stages(0).io.output_prev }
module PriorityQueueStage_224( // @[ShiftRegisterPriorityQueue.scala:21:7] input clock, // @[ShiftRegisterPriorityQueue.scala:21:7] input reset, // @[ShiftRegisterPriorityQueue.scala:21:7] output [30:0] io_output_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14] output [9:0] io_output_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] output [30:0] io_output_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14] output [9:0] io_output_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] input [30:0] io_input_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14] input [9:0] io_input_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] input [30:0] io_input_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14] input [9:0] io_input_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] input io_cmd_valid, // @[ShiftRegisterPriorityQueue.scala:22:14] input io_cmd_bits, // @[ShiftRegisterPriorityQueue.scala:22:14] input io_insert_here, // @[ShiftRegisterPriorityQueue.scala:22:14] input [30:0] io_cur_input_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14] input [9:0] io_cur_input_keyval_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] output [30:0] io_cur_output_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14] output [9:0] io_cur_output_keyval_value_symbol // @[ShiftRegisterPriorityQueue.scala:22:14] ); wire [30:0] io_input_prev_key_0 = io_input_prev_key; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_input_prev_value_symbol_0 = io_input_prev_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_input_nxt_key_0 = io_input_nxt_key; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_input_nxt_value_symbol_0 = io_input_nxt_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7] wire io_cmd_valid_0 = io_cmd_valid; // @[ShiftRegisterPriorityQueue.scala:21:7] wire io_cmd_bits_0 = io_cmd_bits; // @[ShiftRegisterPriorityQueue.scala:21:7] wire io_insert_here_0 = io_insert_here; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_cur_input_keyval_key_0 = io_cur_input_keyval_key; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_cur_input_keyval_value_symbol_0 = io_cur_input_keyval_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] reg [30:0] key_reg; // @[ShiftRegisterPriorityQueue.scala:30:24] assign io_output_prev_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] assign io_output_nxt_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] assign io_cur_output_keyval_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] reg [9:0] value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:31:22] assign io_output_prev_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] assign io_output_nxt_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] assign io_cur_output_keyval_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] wire _T_2 = key_reg >= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24, :52:30] always @(posedge clock) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (reset) // @[ShiftRegisterPriorityQueue.scala:21:7] key_reg <= 31'h7FFFFFFF; // @[ShiftRegisterPriorityQueue.scala:30:24] else if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7] key_reg <= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30] key_reg <= io_input_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] end else // @[ShiftRegisterPriorityQueue.scala:21:7] key_reg <= io_input_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] end if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7] value_reg_symbol <= io_cur_input_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30] value_reg_symbol <= io_input_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] end else // @[ShiftRegisterPriorityQueue.scala:21:7] value_reg_symbol <= io_input_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] end always @(posedge) assign io_output_prev_key = io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_output_prev_value_symbol = io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_output_nxt_key = io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_output_nxt_value_symbol = io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_cur_output_keyval_key = io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_cur_output_keyval_value_symbol = io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File InputUnit.scala: package constellation.router import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.util._ import constellation.channel._ import constellation.routing.{FlowRoutingBundle} import constellation.noc.{HasNoCParams} class AbstractInputUnitIO( val cParam: BaseChannelParams, val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams], )(implicit val p: Parameters) extends Bundle with HasRouterOutputParams { val nodeId = cParam.destId val router_req = Decoupled(new RouteComputerReq) val router_resp = Input(new RouteComputerResp(outParams, egressParams)) val vcalloc_req = Decoupled(new VCAllocReq(cParam, outParams, egressParams)) val vcalloc_resp = Input(new VCAllocResp(outParams, egressParams)) val out_credit_available = Input(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })) val salloc_req = Vec(cParam.destSpeedup, Decoupled(new SwitchAllocReq(outParams, egressParams))) val out = Vec(cParam.destSpeedup, Valid(new SwitchBundle(outParams, egressParams))) val debug = Output(new Bundle { val va_stall = UInt(log2Ceil(cParam.nVirtualChannels).W) val sa_stall = UInt(log2Ceil(cParam.nVirtualChannels).W) }) val block = Input(Bool()) } abstract class AbstractInputUnit( val cParam: BaseChannelParams, val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams] )(implicit val p: Parameters) extends Module with HasRouterOutputParams with HasNoCParams { val nodeId = cParam.destId def io: AbstractInputUnitIO } class InputBuffer(cParam: ChannelParams)(implicit p: Parameters) extends Module { val nVirtualChannels = cParam.nVirtualChannels val io = IO(new Bundle { val enq = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits)))) val deq = Vec(cParam.nVirtualChannels, Decoupled(new BaseFlit(cParam.payloadBits))) }) val useOutputQueues = cParam.useOutputQueues val delims = if (useOutputQueues) { cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize else 0).scanLeft(0)(_+_) } else { // If no queuing, have to add an additional slot since head == tail implies empty // TODO this should be fixed, should use all slots available cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize + 1 else 0).scanLeft(0)(_+_) } val starts = delims.dropRight(1).zipWithIndex.map { case (s,i) => if (cParam.virtualChannelParams(i).traversable) s else 0 } val ends = delims.tail.zipWithIndex.map { case (s,i) => if (cParam.virtualChannelParams(i).traversable) s else 0 } val fullSize = delims.last // Ugly case. Use multiple queues if ((cParam.srcSpeedup > 1 || cParam.destSpeedup > 1 || fullSize <= 1) || !cParam.unifiedBuffer) { require(useOutputQueues) val qs = cParam.virtualChannelParams.map(v => Module(new Queue(new BaseFlit(cParam.payloadBits), v.bufferSize))) qs.zipWithIndex.foreach { case (q,i) => val sel = io.enq.map(f => f.valid && f.bits.virt_channel_id === i.U) q.io.enq.valid := sel.orR q.io.enq.bits.head := Mux1H(sel, io.enq.map(_.bits.head)) q.io.enq.bits.tail := Mux1H(sel, io.enq.map(_.bits.tail)) q.io.enq.bits.payload := Mux1H(sel, io.enq.map(_.bits.payload)) io.deq(i) <> q.io.deq } } else { val mem = Mem(fullSize, new BaseFlit(cParam.payloadBits)) val heads = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W)))) val tails = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W)))) val empty = (heads zip tails).map(t => t._1 === t._2) val qs = Seq.fill(nVirtualChannels) { Module(new Queue(new BaseFlit(cParam.payloadBits), 1, pipe=true)) } qs.foreach(_.io.enq.valid := false.B) qs.foreach(_.io.enq.bits := DontCare) val vc_sel = UIntToOH(io.enq(0).bits.virt_channel_id) val flit = Wire(new BaseFlit(cParam.payloadBits)) val direct_to_q = (Mux1H(vc_sel, qs.map(_.io.enq.ready)) && Mux1H(vc_sel, empty)) && useOutputQueues.B flit.head := io.enq(0).bits.head flit.tail := io.enq(0).bits.tail flit.payload := io.enq(0).bits.payload when (io.enq(0).valid && !direct_to_q) { val tail = tails(io.enq(0).bits.virt_channel_id) mem.write(tail, flit) tails(io.enq(0).bits.virt_channel_id) := Mux( tail === Mux1H(vc_sel, ends.map(_ - 1).map(_ max 0).map(_.U)), Mux1H(vc_sel, starts.map(_.U)), tail + 1.U) } .elsewhen (io.enq(0).valid && direct_to_q) { for (i <- 0 until nVirtualChannels) { when (io.enq(0).bits.virt_channel_id === i.U) { qs(i).io.enq.valid := true.B qs(i).io.enq.bits := flit } } } if (useOutputQueues) { val can_to_q = (0 until nVirtualChannels).map { i => !empty(i) && qs(i).io.enq.ready } val to_q_oh = PriorityEncoderOH(can_to_q) val to_q = OHToUInt(to_q_oh) when (can_to_q.orR) { val head = Mux1H(to_q_oh, heads) heads(to_q) := Mux( head === Mux1H(to_q_oh, ends.map(_ - 1).map(_ max 0).map(_.U)), Mux1H(to_q_oh, starts.map(_.U)), head + 1.U) for (i <- 0 until nVirtualChannels) { when (to_q_oh(i)) { qs(i).io.enq.valid := true.B qs(i).io.enq.bits := mem.read(head) } } } for (i <- 0 until nVirtualChannels) { io.deq(i) <> qs(i).io.deq } } else { qs.map(_.io.deq.ready := false.B) val ready_sel = io.deq.map(_.ready) val fire = io.deq.map(_.fire) assert(PopCount(fire) <= 1.U) val head = Mux1H(fire, heads) when (fire.orR) { val fire_idx = OHToUInt(fire) heads(fire_idx) := Mux( head === Mux1H(fire, ends.map(_ - 1).map(_ max 0).map(_.U)), Mux1H(fire, starts.map(_.U)), head + 1.U) } val read_flit = mem.read(head) for (i <- 0 until nVirtualChannels) { io.deq(i).valid := !empty(i) io.deq(i).bits := read_flit } } } } class InputUnit(cParam: ChannelParams, outParams: Seq[ChannelParams], egressParams: Seq[EgressChannelParams], combineRCVA: Boolean, combineSAST: Boolean ) (implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) { val nVirtualChannels = cParam.nVirtualChannels val virtualChannelParams = cParam.virtualChannelParams class InputUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) { val in = Flipped(new Channel(cParam.asInstanceOf[ChannelParams])) } val io = IO(new InputUnitIO) val g_i :: g_r :: g_v :: g_a :: g_c :: Nil = Enum(5) class InputState extends Bundle { val g = UInt(3.W) val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }) val flow = new FlowRoutingBundle val fifo_deps = UInt(nVirtualChannels.W) } val input_buffer = Module(new InputBuffer(cParam)) for (i <- 0 until cParam.srcSpeedup) { input_buffer.io.enq(i) := io.in.flit(i) } input_buffer.io.deq.foreach(_.ready := false.B) val route_arbiter = Module(new Arbiter( new RouteComputerReq, nVirtualChannels )) io.router_req <> route_arbiter.io.out val states = Reg(Vec(nVirtualChannels, new InputState)) val anyFifo = cParam.possibleFlows.map(_.fifo).reduce(_||_) val allFifo = cParam.possibleFlows.map(_.fifo).reduce(_&&_) if (anyFifo) { val idle_mask = VecInit(states.map(_.g === g_i)).asUInt for (s <- states) for (i <- 0 until nVirtualChannels) s.fifo_deps := s.fifo_deps & ~idle_mask } for (i <- 0 until cParam.srcSpeedup) { when (io.in.flit(i).fire && io.in.flit(i).bits.head) { val id = io.in.flit(i).bits.virt_channel_id assert(id < nVirtualChannels.U) assert(states(id).g === g_i) val at_dest = io.in.flit(i).bits.flow.egress_node === nodeId.U states(id).g := Mux(at_dest, g_v, g_r) states(id).vc_sel.foreach(_.foreach(_ := false.B)) for (o <- 0 until nEgress) { when (o.U === io.in.flit(i).bits.flow.egress_node_id) { states(id).vc_sel(o+nOutputs)(0) := true.B } } states(id).flow := io.in.flit(i).bits.flow if (anyFifo) { val fifo = cParam.possibleFlows.filter(_.fifo).map(_.isFlow(io.in.flit(i).bits.flow)).toSeq.orR states(id).fifo_deps := VecInit(states.zipWithIndex.map { case (s, j) => s.g =/= g_i && s.flow.asUInt === io.in.flit(i).bits.flow.asUInt && j.U =/= id }).asUInt } } } (route_arbiter.io.in zip states).zipWithIndex.map { case ((i,s),idx) => if (virtualChannelParams(idx).traversable) { i.valid := s.g === g_r i.bits.flow := s.flow i.bits.src_virt_id := idx.U when (i.fire) { s.g := g_v } } else { i.valid := false.B i.bits := DontCare } } when (io.router_req.fire) { val id = io.router_req.bits.src_virt_id assert(states(id).g === g_r) states(id).g := g_v for (i <- 0 until nVirtualChannels) { when (i.U === id) { states(i).vc_sel := io.router_resp.vc_sel } } } val mask = RegInit(0.U(nVirtualChannels.W)) val vcalloc_reqs = Wire(Vec(nVirtualChannels, new VCAllocReq(cParam, outParams, egressParams))) val vcalloc_vals = Wire(Vec(nVirtualChannels, Bool())) val vcalloc_filter = PriorityEncoderOH(Cat(vcalloc_vals.asUInt, vcalloc_vals.asUInt & ~mask)) val vcalloc_sel = vcalloc_filter(nVirtualChannels-1,0) | (vcalloc_filter >> nVirtualChannels) // Prioritize incoming packetes when (io.router_req.fire) { mask := (1.U << io.router_req.bits.src_virt_id) - 1.U } .elsewhen (vcalloc_vals.orR) { mask := Mux1H(vcalloc_sel, (0 until nVirtualChannels).map { w => ~(0.U((w+1).W)) }) } io.vcalloc_req.valid := vcalloc_vals.orR io.vcalloc_req.bits := Mux1H(vcalloc_sel, vcalloc_reqs) states.zipWithIndex.map { case (s,idx) => if (virtualChannelParams(idx).traversable) { vcalloc_vals(idx) := s.g === g_v && s.fifo_deps === 0.U vcalloc_reqs(idx).in_vc := idx.U vcalloc_reqs(idx).vc_sel := s.vc_sel vcalloc_reqs(idx).flow := s.flow when (vcalloc_vals(idx) && vcalloc_sel(idx) && io.vcalloc_req.ready) { s.g := g_a } if (combineRCVA) { when (route_arbiter.io.in(idx).fire) { vcalloc_vals(idx) := true.B vcalloc_reqs(idx).vc_sel := io.router_resp.vc_sel } } } else { vcalloc_vals(idx) := false.B vcalloc_reqs(idx) := DontCare } } io.debug.va_stall := PopCount(vcalloc_vals) - io.vcalloc_req.ready when (io.vcalloc_req.fire) { for (i <- 0 until nVirtualChannels) { when (vcalloc_sel(i)) { states(i).vc_sel := io.vcalloc_resp.vc_sel states(i).g := g_a if (!combineRCVA) { assert(states(i).g === g_v) } } } } val salloc_arb = Module(new SwitchArbiter( nVirtualChannels, cParam.destSpeedup, outParams, egressParams )) (states zip salloc_arb.io.in).zipWithIndex.map { case ((s,r),i) => if (virtualChannelParams(i).traversable) { val credit_available = (s.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U r.valid := s.g === g_a && credit_available && input_buffer.io.deq(i).valid r.bits.vc_sel := s.vc_sel val deq_tail = input_buffer.io.deq(i).bits.tail r.bits.tail := deq_tail when (r.fire && deq_tail) { s.g := g_i } input_buffer.io.deq(i).ready := r.ready } else { r.valid := false.B r.bits := DontCare } } io.debug.sa_stall := PopCount(salloc_arb.io.in.map(r => r.valid && !r.ready)) io.salloc_req <> salloc_arb.io.out when (io.block) { salloc_arb.io.out.foreach(_.ready := false.B) io.salloc_req.foreach(_.valid := false.B) } class OutBundle extends Bundle { val valid = Bool() val vid = UInt(virtualChannelBits.W) val out_vid = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W) val flit = new Flit(cParam.payloadBits) } val salloc_outs = if (combineSAST) { Wire(Vec(cParam.destSpeedup, new OutBundle)) } else { Reg(Vec(cParam.destSpeedup, new OutBundle)) } io.in.credit_return := salloc_arb.io.out.zipWithIndex.map { case (o, i) => Mux(o.fire, salloc_arb.io.chosen_oh(i), 0.U) }.reduce(_|_) io.in.vc_free := salloc_arb.io.out.zipWithIndex.map { case (o, i) => Mux(o.fire && Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)), salloc_arb.io.chosen_oh(i), 0.U) }.reduce(_|_) for (i <- 0 until cParam.destSpeedup) { val salloc_out = salloc_outs(i) salloc_out.valid := salloc_arb.io.out(i).fire salloc_out.vid := OHToUInt(salloc_arb.io.chosen_oh(i)) val vc_sel = Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.vc_sel)) val channel_oh = vc_sel.map(_.reduce(_||_)).toSeq val virt_channel = Mux1H(channel_oh, vc_sel.map(v => OHToUInt(v)).toSeq) when (salloc_arb.io.out(i).fire) { salloc_out.out_vid := virt_channel salloc_out.flit.payload := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.payload)) salloc_out.flit.head := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.head)) salloc_out.flit.tail := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)) salloc_out.flit.flow := Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.flow)) } .otherwise { salloc_out.out_vid := DontCare salloc_out.flit := DontCare } salloc_out.flit.virt_channel_id := DontCare // this gets set in the switch io.out(i).valid := salloc_out.valid io.out(i).bits.flit := salloc_out.flit io.out(i).bits.out_virt_channel := salloc_out.out_vid } def filterVCSel(sel: MixedVec[Vec[Bool]], srcV: Int) = { if (virtualChannelParams(srcV).traversable) { outParams.zipWithIndex.map { case (oP, oI) => (0 until oP.nVirtualChannels).map { oV => var allow = false virtualChannelParams(srcV).possibleFlows.foreach { pI => allow = allow || routingRelation( cParam.channelRoutingInfos(srcV), oP.channelRoutingInfos(oV), pI ) } if (!allow) sel(oI)(oV) := false.B } } } } (0 until nVirtualChannels).map { i => if (!virtualChannelParams(i).traversable) states(i) := DontCare filterVCSel(states(i).vc_sel, i) } when (reset.asBool) { states.foreach(_.g := g_i) } }
module InputUnit_30( // @[InputUnit.scala:158:7] input clock, // @[InputUnit.scala:158:7] input reset, // @[InputUnit.scala:158:7] output [2:0] io_router_req_bits_src_virt_id, // @[InputUnit.scala:170:14] output [2:0] io_router_req_bits_flow_vnet_id, // @[InputUnit.scala:170:14] output [4:0] io_router_req_bits_flow_ingress_node, // @[InputUnit.scala:170:14] output [1:0] io_router_req_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14] output [4:0] io_router_req_bits_flow_egress_node, // @[InputUnit.scala:170:14] output [1:0] io_router_req_bits_flow_egress_node_id, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_3_1, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_3_2, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_3_3, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_3_4, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_3_5, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_3_6, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_3_7, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_1_1, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_1_2, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_1_3, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_1_4, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_1_5, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_1_6, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_1_7, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_0_1, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_0_2, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_0_3, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_0_4, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_0_5, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_0_6, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_0_7, // @[InputUnit.scala:170:14] input io_vcalloc_req_ready, // @[InputUnit.scala:170:14] output io_vcalloc_req_valid, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_3_1, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_3_2, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_3_3, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_3_4, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_3_5, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_3_6, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_3_7, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_1, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_2, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_3, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_4, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_5, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_6, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_7, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_0_1, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_0_2, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_0_3, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_0_4, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_0_5, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_0_6, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_0_7, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_3_1, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_3_2, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_3_3, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_3_4, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_3_5, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_3_6, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_3_7, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_1_1, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_1_2, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_1_3, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_1_4, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_1_5, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_1_6, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_1_7, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_0_1, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_0_2, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_0_3, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_0_4, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_0_5, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_0_6, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_0_7, // @[InputUnit.scala:170:14] input io_out_credit_available_3_1, // @[InputUnit.scala:170:14] input io_out_credit_available_3_2, // @[InputUnit.scala:170:14] input io_out_credit_available_3_3, // @[InputUnit.scala:170:14] input io_out_credit_available_3_4, // @[InputUnit.scala:170:14] input io_out_credit_available_3_5, // @[InputUnit.scala:170:14] input io_out_credit_available_3_6, // @[InputUnit.scala:170:14] input io_out_credit_available_3_7, // @[InputUnit.scala:170:14] input io_out_credit_available_2_0, // @[InputUnit.scala:170:14] input io_out_credit_available_2_1, // @[InputUnit.scala:170:14] input io_out_credit_available_2_2, // @[InputUnit.scala:170:14] input io_out_credit_available_2_3, // @[InputUnit.scala:170:14] input io_out_credit_available_2_4, // @[InputUnit.scala:170:14] input io_out_credit_available_2_5, // @[InputUnit.scala:170:14] input io_out_credit_available_2_6, // @[InputUnit.scala:170:14] input io_out_credit_available_2_7, // @[InputUnit.scala:170:14] input io_out_credit_available_1_1, // @[InputUnit.scala:170:14] input io_out_credit_available_1_2, // @[InputUnit.scala:170:14] input io_out_credit_available_1_3, // @[InputUnit.scala:170:14] input io_out_credit_available_1_4, // @[InputUnit.scala:170:14] input io_out_credit_available_1_5, // @[InputUnit.scala:170:14] input io_out_credit_available_1_6, // @[InputUnit.scala:170:14] input io_out_credit_available_1_7, // @[InputUnit.scala:170:14] input io_out_credit_available_0_1, // @[InputUnit.scala:170:14] input io_out_credit_available_0_2, // @[InputUnit.scala:170:14] input io_out_credit_available_0_3, // @[InputUnit.scala:170:14] input io_out_credit_available_0_4, // @[InputUnit.scala:170:14] input io_out_credit_available_0_5, // @[InputUnit.scala:170:14] input io_out_credit_available_0_6, // @[InputUnit.scala:170:14] input io_out_credit_available_0_7, // @[InputUnit.scala:170:14] input io_salloc_req_0_ready, // @[InputUnit.scala:170:14] output io_salloc_req_0_valid, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_3_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_3_1, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_3_2, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_3_3, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_3_4, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_3_5, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_3_6, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_3_7, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_1, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_2, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_3, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_4, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_5, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_6, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_7, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_1, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_2, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_3, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_4, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_5, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_6, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_7, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_1, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_2, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_3, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_4, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_5, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_6, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_7, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_tail, // @[InputUnit.scala:170:14] output io_out_0_valid, // @[InputUnit.scala:170:14] output io_out_0_bits_flit_head, // @[InputUnit.scala:170:14] output io_out_0_bits_flit_tail, // @[InputUnit.scala:170:14] output [72:0] io_out_0_bits_flit_payload, // @[InputUnit.scala:170:14] output [2:0] io_out_0_bits_flit_flow_vnet_id, // @[InputUnit.scala:170:14] output [4:0] io_out_0_bits_flit_flow_ingress_node, // @[InputUnit.scala:170:14] output [1:0] io_out_0_bits_flit_flow_ingress_node_id, // @[InputUnit.scala:170:14] output [4:0] io_out_0_bits_flit_flow_egress_node, // @[InputUnit.scala:170:14] output [1:0] io_out_0_bits_flit_flow_egress_node_id, // @[InputUnit.scala:170:14] output [2:0] io_out_0_bits_out_virt_channel, // @[InputUnit.scala:170:14] output [2:0] io_debug_va_stall, // @[InputUnit.scala:170:14] output [2:0] io_debug_sa_stall, // @[InputUnit.scala:170:14] input io_in_flit_0_valid, // @[InputUnit.scala:170:14] input io_in_flit_0_bits_head, // @[InputUnit.scala:170:14] input io_in_flit_0_bits_tail, // @[InputUnit.scala:170:14] input [72:0] io_in_flit_0_bits_payload, // @[InputUnit.scala:170:14] input [2:0] io_in_flit_0_bits_flow_vnet_id, // @[InputUnit.scala:170:14] input [4:0] io_in_flit_0_bits_flow_ingress_node, // @[InputUnit.scala:170:14] input [1:0] io_in_flit_0_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14] input [4:0] io_in_flit_0_bits_flow_egress_node, // @[InputUnit.scala:170:14] input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[InputUnit.scala:170:14] input [2:0] io_in_flit_0_bits_virt_channel_id, // @[InputUnit.scala:170:14] output [7:0] io_in_credit_return, // @[InputUnit.scala:170:14] output [7:0] io_in_vc_free // @[InputUnit.scala:170:14] ); wire vcalloc_vals_7; // @[InputUnit.scala:266:32] wire vcalloc_vals_6; // @[InputUnit.scala:266:32] wire vcalloc_vals_5; // @[InputUnit.scala:266:32] wire vcalloc_vals_4; // @[InputUnit.scala:266:32] wire vcalloc_vals_3; // @[InputUnit.scala:266:32] wire vcalloc_vals_2; // @[InputUnit.scala:266:32] wire vcalloc_vals_1; // @[InputUnit.scala:266:32] wire _salloc_arb_io_in_1_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_2_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_3_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_4_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_5_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_6_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_7_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_out_0_valid; // @[InputUnit.scala:296:26] wire [7:0] _salloc_arb_io_chosen_oh_0; // @[InputUnit.scala:296:26] wire _route_arbiter_io_in_1_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_2_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_3_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_4_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_5_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_6_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_7_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_out_valid; // @[InputUnit.scala:187:29] wire [2:0] _route_arbiter_io_out_bits_src_virt_id; // @[InputUnit.scala:187:29] wire _input_buffer_io_deq_0_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_0_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_0_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_1_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_1_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_1_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_1_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_2_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_2_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_2_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_2_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_3_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_3_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_3_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_3_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_4_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_4_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_4_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_4_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_5_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_5_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_5_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_5_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_6_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_6_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_6_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_6_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_7_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_7_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_7_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_7_bits_payload; // @[InputUnit.scala:181:28] reg [2:0] states_1_g; // @[InputUnit.scala:192:19] reg states_1_vc_sel_3_1; // @[InputUnit.scala:192:19] reg states_1_vc_sel_1_1; // @[InputUnit.scala:192:19] reg states_1_vc_sel_0_1; // @[InputUnit.scala:192:19] reg states_1_vc_sel_0_2; // @[InputUnit.scala:192:19] reg states_1_vc_sel_0_3; // @[InputUnit.scala:192:19] reg states_1_vc_sel_0_4; // @[InputUnit.scala:192:19] reg states_1_vc_sel_0_5; // @[InputUnit.scala:192:19] reg states_1_vc_sel_0_6; // @[InputUnit.scala:192:19] reg states_1_vc_sel_0_7; // @[InputUnit.scala:192:19] reg [2:0] states_1_flow_vnet_id; // @[InputUnit.scala:192:19] reg [4:0] states_1_flow_ingress_node; // @[InputUnit.scala:192:19] reg [1:0] states_1_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [4:0] states_1_flow_egress_node; // @[InputUnit.scala:192:19] reg [1:0] states_1_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_2_g; // @[InputUnit.scala:192:19] reg states_2_vc_sel_3_1; // @[InputUnit.scala:192:19] reg states_2_vc_sel_3_2; // @[InputUnit.scala:192:19] reg states_2_vc_sel_3_3; // @[InputUnit.scala:192:19] reg states_2_vc_sel_3_4; // @[InputUnit.scala:192:19] reg states_2_vc_sel_3_5; // @[InputUnit.scala:192:19] reg states_2_vc_sel_3_6; // @[InputUnit.scala:192:19] reg states_2_vc_sel_3_7; // @[InputUnit.scala:192:19] reg states_2_vc_sel_1_1; // @[InputUnit.scala:192:19] reg states_2_vc_sel_1_2; // @[InputUnit.scala:192:19] reg states_2_vc_sel_1_3; // @[InputUnit.scala:192:19] reg states_2_vc_sel_1_4; // @[InputUnit.scala:192:19] reg states_2_vc_sel_1_5; // @[InputUnit.scala:192:19] reg states_2_vc_sel_1_6; // @[InputUnit.scala:192:19] reg states_2_vc_sel_1_7; // @[InputUnit.scala:192:19] reg states_2_vc_sel_0_1; // @[InputUnit.scala:192:19] reg states_2_vc_sel_0_2; // @[InputUnit.scala:192:19] reg states_2_vc_sel_0_3; // @[InputUnit.scala:192:19] reg states_2_vc_sel_0_4; // @[InputUnit.scala:192:19] reg states_2_vc_sel_0_5; // @[InputUnit.scala:192:19] reg states_2_vc_sel_0_6; // @[InputUnit.scala:192:19] reg states_2_vc_sel_0_7; // @[InputUnit.scala:192:19] reg [2:0] states_2_flow_vnet_id; // @[InputUnit.scala:192:19] reg [4:0] states_2_flow_ingress_node; // @[InputUnit.scala:192:19] reg [1:0] states_2_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [4:0] states_2_flow_egress_node; // @[InputUnit.scala:192:19] reg [1:0] states_2_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_3_g; // @[InputUnit.scala:192:19] reg states_3_vc_sel_3_1; // @[InputUnit.scala:192:19] reg states_3_vc_sel_3_2; // @[InputUnit.scala:192:19] reg states_3_vc_sel_3_3; // @[InputUnit.scala:192:19] reg states_3_vc_sel_3_4; // @[InputUnit.scala:192:19] reg states_3_vc_sel_3_5; // @[InputUnit.scala:192:19] reg states_3_vc_sel_3_6; // @[InputUnit.scala:192:19] reg states_3_vc_sel_3_7; // @[InputUnit.scala:192:19] reg states_3_vc_sel_1_1; // @[InputUnit.scala:192:19] reg states_3_vc_sel_1_2; // @[InputUnit.scala:192:19] reg states_3_vc_sel_1_3; // @[InputUnit.scala:192:19] reg states_3_vc_sel_1_4; // @[InputUnit.scala:192:19] reg states_3_vc_sel_1_5; // @[InputUnit.scala:192:19] reg states_3_vc_sel_1_6; // @[InputUnit.scala:192:19] reg states_3_vc_sel_1_7; // @[InputUnit.scala:192:19] reg states_3_vc_sel_0_1; // @[InputUnit.scala:192:19] reg states_3_vc_sel_0_2; // @[InputUnit.scala:192:19] reg states_3_vc_sel_0_3; // @[InputUnit.scala:192:19] reg states_3_vc_sel_0_4; // @[InputUnit.scala:192:19] reg states_3_vc_sel_0_5; // @[InputUnit.scala:192:19] reg states_3_vc_sel_0_6; // @[InputUnit.scala:192:19] reg states_3_vc_sel_0_7; // @[InputUnit.scala:192:19] reg [2:0] states_3_flow_vnet_id; // @[InputUnit.scala:192:19] reg [4:0] states_3_flow_ingress_node; // @[InputUnit.scala:192:19] reg [1:0] states_3_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [4:0] states_3_flow_egress_node; // @[InputUnit.scala:192:19] reg [1:0] states_3_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_4_g; // @[InputUnit.scala:192:19] reg states_4_vc_sel_3_1; // @[InputUnit.scala:192:19] reg states_4_vc_sel_3_2; // @[InputUnit.scala:192:19] reg states_4_vc_sel_3_3; // @[InputUnit.scala:192:19] reg states_4_vc_sel_3_4; // @[InputUnit.scala:192:19] reg states_4_vc_sel_3_5; // @[InputUnit.scala:192:19] reg states_4_vc_sel_3_6; // @[InputUnit.scala:192:19] reg states_4_vc_sel_3_7; // @[InputUnit.scala:192:19] reg states_4_vc_sel_1_1; // @[InputUnit.scala:192:19] reg states_4_vc_sel_1_2; // @[InputUnit.scala:192:19] reg states_4_vc_sel_1_3; // @[InputUnit.scala:192:19] reg states_4_vc_sel_1_4; // @[InputUnit.scala:192:19] reg states_4_vc_sel_1_5; // @[InputUnit.scala:192:19] reg states_4_vc_sel_1_6; // @[InputUnit.scala:192:19] reg states_4_vc_sel_1_7; // @[InputUnit.scala:192:19] reg states_4_vc_sel_0_1; // @[InputUnit.scala:192:19] reg states_4_vc_sel_0_2; // @[InputUnit.scala:192:19] reg states_4_vc_sel_0_3; // @[InputUnit.scala:192:19] reg states_4_vc_sel_0_4; // @[InputUnit.scala:192:19] reg states_4_vc_sel_0_5; // @[InputUnit.scala:192:19] reg states_4_vc_sel_0_6; // @[InputUnit.scala:192:19] reg states_4_vc_sel_0_7; // @[InputUnit.scala:192:19] reg [2:0] states_4_flow_vnet_id; // @[InputUnit.scala:192:19] reg [4:0] states_4_flow_ingress_node; // @[InputUnit.scala:192:19] reg [1:0] states_4_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [4:0] states_4_flow_egress_node; // @[InputUnit.scala:192:19] reg [1:0] states_4_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_5_g; // @[InputUnit.scala:192:19] reg states_5_vc_sel_3_1; // @[InputUnit.scala:192:19] reg states_5_vc_sel_3_2; // @[InputUnit.scala:192:19] reg states_5_vc_sel_3_3; // @[InputUnit.scala:192:19] reg states_5_vc_sel_3_4; // @[InputUnit.scala:192:19] reg states_5_vc_sel_3_5; // @[InputUnit.scala:192:19] reg states_5_vc_sel_3_6; // @[InputUnit.scala:192:19] reg states_5_vc_sel_3_7; // @[InputUnit.scala:192:19] reg states_5_vc_sel_1_1; // @[InputUnit.scala:192:19] reg states_5_vc_sel_1_2; // @[InputUnit.scala:192:19] reg states_5_vc_sel_1_3; // @[InputUnit.scala:192:19] reg states_5_vc_sel_1_4; // @[InputUnit.scala:192:19] reg states_5_vc_sel_1_5; // @[InputUnit.scala:192:19] reg states_5_vc_sel_1_6; // @[InputUnit.scala:192:19] reg states_5_vc_sel_1_7; // @[InputUnit.scala:192:19] reg states_5_vc_sel_0_1; // @[InputUnit.scala:192:19] reg states_5_vc_sel_0_2; // @[InputUnit.scala:192:19] reg states_5_vc_sel_0_3; // @[InputUnit.scala:192:19] reg states_5_vc_sel_0_4; // @[InputUnit.scala:192:19] reg states_5_vc_sel_0_5; // @[InputUnit.scala:192:19] reg states_5_vc_sel_0_6; // @[InputUnit.scala:192:19] reg states_5_vc_sel_0_7; // @[InputUnit.scala:192:19] reg [2:0] states_5_flow_vnet_id; // @[InputUnit.scala:192:19] reg [4:0] states_5_flow_ingress_node; // @[InputUnit.scala:192:19] reg [1:0] states_5_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [4:0] states_5_flow_egress_node; // @[InputUnit.scala:192:19] reg [1:0] states_5_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_6_g; // @[InputUnit.scala:192:19] reg states_6_vc_sel_3_1; // @[InputUnit.scala:192:19] reg states_6_vc_sel_3_2; // @[InputUnit.scala:192:19] reg states_6_vc_sel_3_3; // @[InputUnit.scala:192:19] reg states_6_vc_sel_3_4; // @[InputUnit.scala:192:19] reg states_6_vc_sel_3_5; // @[InputUnit.scala:192:19] reg states_6_vc_sel_3_6; // @[InputUnit.scala:192:19] reg states_6_vc_sel_3_7; // @[InputUnit.scala:192:19] reg states_6_vc_sel_1_1; // @[InputUnit.scala:192:19] reg states_6_vc_sel_1_2; // @[InputUnit.scala:192:19] reg states_6_vc_sel_1_3; // @[InputUnit.scala:192:19] reg states_6_vc_sel_1_4; // @[InputUnit.scala:192:19] reg states_6_vc_sel_1_5; // @[InputUnit.scala:192:19] reg states_6_vc_sel_1_6; // @[InputUnit.scala:192:19] reg states_6_vc_sel_1_7; // @[InputUnit.scala:192:19] reg states_6_vc_sel_0_1; // @[InputUnit.scala:192:19] reg states_6_vc_sel_0_2; // @[InputUnit.scala:192:19] reg states_6_vc_sel_0_3; // @[InputUnit.scala:192:19] reg states_6_vc_sel_0_4; // @[InputUnit.scala:192:19] reg states_6_vc_sel_0_5; // @[InputUnit.scala:192:19] reg states_6_vc_sel_0_6; // @[InputUnit.scala:192:19] reg states_6_vc_sel_0_7; // @[InputUnit.scala:192:19] reg [2:0] states_6_flow_vnet_id; // @[InputUnit.scala:192:19] reg [4:0] states_6_flow_ingress_node; // @[InputUnit.scala:192:19] reg [1:0] states_6_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [4:0] states_6_flow_egress_node; // @[InputUnit.scala:192:19] reg [1:0] states_6_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_7_g; // @[InputUnit.scala:192:19] reg states_7_vc_sel_3_1; // @[InputUnit.scala:192:19] reg states_7_vc_sel_3_2; // @[InputUnit.scala:192:19] reg states_7_vc_sel_3_3; // @[InputUnit.scala:192:19] reg states_7_vc_sel_3_4; // @[InputUnit.scala:192:19] reg states_7_vc_sel_3_5; // @[InputUnit.scala:192:19] reg states_7_vc_sel_3_6; // @[InputUnit.scala:192:19] reg states_7_vc_sel_3_7; // @[InputUnit.scala:192:19] reg states_7_vc_sel_1_1; // @[InputUnit.scala:192:19] reg states_7_vc_sel_1_2; // @[InputUnit.scala:192:19] reg states_7_vc_sel_1_3; // @[InputUnit.scala:192:19] reg states_7_vc_sel_1_4; // @[InputUnit.scala:192:19] reg states_7_vc_sel_1_5; // @[InputUnit.scala:192:19] reg states_7_vc_sel_1_6; // @[InputUnit.scala:192:19] reg states_7_vc_sel_1_7; // @[InputUnit.scala:192:19] reg states_7_vc_sel_0_1; // @[InputUnit.scala:192:19] reg states_7_vc_sel_0_2; // @[InputUnit.scala:192:19] reg states_7_vc_sel_0_3; // @[InputUnit.scala:192:19] reg states_7_vc_sel_0_4; // @[InputUnit.scala:192:19] reg states_7_vc_sel_0_5; // @[InputUnit.scala:192:19] reg states_7_vc_sel_0_6; // @[InputUnit.scala:192:19] reg states_7_vc_sel_0_7; // @[InputUnit.scala:192:19] reg [2:0] states_7_flow_vnet_id; // @[InputUnit.scala:192:19] reg [4:0] states_7_flow_ingress_node; // @[InputUnit.scala:192:19] reg [1:0] states_7_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [4:0] states_7_flow_egress_node; // @[InputUnit.scala:192:19] reg [1:0] states_7_flow_egress_node_id; // @[InputUnit.scala:192:19] wire _GEN = io_in_flit_0_valid & io_in_flit_0_bits_head; // @[InputUnit.scala:205:30] wire route_arbiter_io_in_1_valid = states_1_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_2_valid = states_2_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_3_valid = states_3_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_4_valid = states_4_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_5_valid = states_5_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_6_valid = states_6_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_7_valid = states_7_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] reg [7:0] mask; // @[InputUnit.scala:250:21] wire [7:0] _vcalloc_filter_T_3 = {vcalloc_vals_7, vcalloc_vals_6, vcalloc_vals_5, vcalloc_vals_4, vcalloc_vals_3, vcalloc_vals_2, vcalloc_vals_1, 1'h0} & ~mask; // @[InputUnit.scala:250:21, :253:{80,87,89}, :266:32] wire [15:0] vcalloc_filter = _vcalloc_filter_T_3[0] ? 16'h1 : _vcalloc_filter_T_3[1] ? 16'h2 : _vcalloc_filter_T_3[2] ? 16'h4 : _vcalloc_filter_T_3[3] ? 16'h8 : _vcalloc_filter_T_3[4] ? 16'h10 : _vcalloc_filter_T_3[5] ? 16'h20 : _vcalloc_filter_T_3[6] ? 16'h40 : _vcalloc_filter_T_3[7] ? 16'h80 : vcalloc_vals_1 ? 16'h200 : vcalloc_vals_2 ? 16'h400 : vcalloc_vals_3 ? 16'h800 : vcalloc_vals_4 ? 16'h1000 : vcalloc_vals_5 ? 16'h2000 : vcalloc_vals_6 ? 16'h4000 : {vcalloc_vals_7, 15'h0}; // @[OneHot.scala:85:71] wire [7:0] vcalloc_sel = vcalloc_filter[7:0] | vcalloc_filter[15:8]; // @[Mux.scala:50:70] wire io_vcalloc_req_valid_0 = vcalloc_vals_1 | vcalloc_vals_2 | vcalloc_vals_3 | vcalloc_vals_4 | vcalloc_vals_5 | vcalloc_vals_6 | vcalloc_vals_7; // @[package.scala:81:59] assign vcalloc_vals_1 = states_1_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_2 = states_2_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_3 = states_3_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_4 = states_4_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_5 = states_5_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_6 = states_6_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_7 = states_7_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] wire _GEN_0 = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35] wire _GEN_1 = _GEN_0 & vcalloc_sel[1]; // @[Mux.scala:32:36] wire _GEN_2 = _GEN_0 & vcalloc_sel[2]; // @[Mux.scala:32:36] wire _GEN_3 = _GEN_0 & vcalloc_sel[3]; // @[Mux.scala:32:36] wire _GEN_4 = _GEN_0 & vcalloc_sel[4]; // @[Mux.scala:32:36] wire _GEN_5 = _GEN_0 & vcalloc_sel[5]; // @[Mux.scala:32:36] wire _GEN_6 = _GEN_0 & vcalloc_sel[6]; // @[Mux.scala:32:36] wire _GEN_7 = _GEN_0 & vcalloc_sel[7]; // @[Mux.scala:32:36]
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w1_d3_i0_77( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7] wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire io_q_0; // @[SynchronizerReg.scala:80:7] assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_121 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_1), // @[SynchronizerReg.scala:87:41] .io_q (output_0) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Buffer.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.BufferParams class TLBufferNode ( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit valName: ValName) extends TLAdapterNode( clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) }, managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) } ) { override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}" override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none) } class TLBuffer( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters) extends LazyModule { def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace) def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde) def this()(implicit p: Parameters) = this(BufferParams.default) val node = new TLBufferNode(a, b, c, d, e) lazy val module = new Impl class Impl extends LazyModuleImp(this) { def headBundle = node.out.head._2.bundle override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_") (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.a <> a(in .a) in .d <> d(out.d) if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) { in .b <> b(out.b) out.c <> c(in .c) out.e <> e(in .e) } else { in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B } } } } object TLBuffer { def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default) def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde) def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace) def apply( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters): TLNode = { val buffer = LazyModule(new TLBuffer(a, b, c, d, e)) buffer.node } def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = { val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) } name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } } buffers.map(_.node) } def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = { chain(depth, name) .reduceLeftOption(_ :*=* _) .getOrElse(TLNameNode("no_buffer")) } }
module TLBuffer_18( // @[Buffer.scala:40:9] input clock, // @[Buffer.scala:40:9] input reset // @[Buffer.scala:40:9] ); endmodule
Generate the Verilog code corresponding to the following Chisel files. File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag }
module OptimizationBarrier_TLBEntryData_26( // @[package.scala:267:30] input clock, // @[package.scala:267:30] input reset, // @[package.scala:267:30] input [19:0] io_x_ppn, // @[package.scala:268:18] input io_x_u, // @[package.scala:268:18] input io_x_g, // @[package.scala:268:18] input io_x_ae_ptw, // @[package.scala:268:18] input io_x_ae_final, // @[package.scala:268:18] input io_x_ae_stage2, // @[package.scala:268:18] input io_x_pf, // @[package.scala:268:18] input io_x_gf, // @[package.scala:268:18] input io_x_sw, // @[package.scala:268:18] input io_x_sx, // @[package.scala:268:18] input io_x_sr, // @[package.scala:268:18] input io_x_hw, // @[package.scala:268:18] input io_x_hx, // @[package.scala:268:18] input io_x_hr, // @[package.scala:268:18] input io_x_pw, // @[package.scala:268:18] input io_x_px, // @[package.scala:268:18] input io_x_pr, // @[package.scala:268:18] input io_x_ppp, // @[package.scala:268:18] input io_x_pal, // @[package.scala:268:18] input io_x_paa, // @[package.scala:268:18] input io_x_eff, // @[package.scala:268:18] input io_x_c, // @[package.scala:268:18] input io_x_fragmented_superpage, // @[package.scala:268:18] output io_y_u, // @[package.scala:268:18] output io_y_ae_ptw, // @[package.scala:268:18] output io_y_ae_final, // @[package.scala:268:18] output io_y_ae_stage2, // @[package.scala:268:18] output io_y_pf, // @[package.scala:268:18] output io_y_gf, // @[package.scala:268:18] output io_y_sw, // @[package.scala:268:18] output io_y_sx, // @[package.scala:268:18] output io_y_sr, // @[package.scala:268:18] output io_y_hw, // @[package.scala:268:18] output io_y_hx, // @[package.scala:268:18] output io_y_hr, // @[package.scala:268:18] output io_y_pw, // @[package.scala:268:18] output io_y_px, // @[package.scala:268:18] output io_y_pr, // @[package.scala:268:18] output io_y_ppp, // @[package.scala:268:18] output io_y_pal, // @[package.scala:268:18] output io_y_paa, // @[package.scala:268:18] output io_y_eff, // @[package.scala:268:18] output io_y_c // @[package.scala:268:18] ); wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30] wire io_x_u_0 = io_x_u; // @[package.scala:267:30] wire io_x_g_0 = io_x_g; // @[package.scala:267:30] wire io_x_ae_ptw_0 = io_x_ae_ptw; // @[package.scala:267:30] wire io_x_ae_final_0 = io_x_ae_final; // @[package.scala:267:30] wire io_x_ae_stage2_0 = io_x_ae_stage2; // @[package.scala:267:30] wire io_x_pf_0 = io_x_pf; // @[package.scala:267:30] wire io_x_gf_0 = io_x_gf; // @[package.scala:267:30] wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30] wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30] wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30] wire io_x_hw_0 = io_x_hw; // @[package.scala:267:30] wire io_x_hx_0 = io_x_hx; // @[package.scala:267:30] wire io_x_hr_0 = io_x_hr; // @[package.scala:267:30] wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30] wire io_x_px_0 = io_x_px; // @[package.scala:267:30] wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30] wire io_x_ppp_0 = io_x_ppp; // @[package.scala:267:30] wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30] wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30] wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30] wire io_x_c_0 = io_x_c; // @[package.scala:267:30] wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30] wire [19:0] io_y_ppn = io_x_ppn_0; // @[package.scala:267:30] wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30] wire io_y_g = io_x_g_0; // @[package.scala:267:30] wire io_y_ae_ptw_0 = io_x_ae_ptw_0; // @[package.scala:267:30] wire io_y_ae_final_0 = io_x_ae_final_0; // @[package.scala:267:30] wire io_y_ae_stage2_0 = io_x_ae_stage2_0; // @[package.scala:267:30] wire io_y_pf_0 = io_x_pf_0; // @[package.scala:267:30] wire io_y_gf_0 = io_x_gf_0; // @[package.scala:267:30] wire io_y_sw_0 = io_x_sw_0; // @[package.scala:267:30] wire io_y_sx_0 = io_x_sx_0; // @[package.scala:267:30] wire io_y_sr_0 = io_x_sr_0; // @[package.scala:267:30] wire io_y_hw_0 = io_x_hw_0; // @[package.scala:267:30] wire io_y_hx_0 = io_x_hx_0; // @[package.scala:267:30] wire io_y_hr_0 = io_x_hr_0; // @[package.scala:267:30] wire io_y_pw_0 = io_x_pw_0; // @[package.scala:267:30] wire io_y_px_0 = io_x_px_0; // @[package.scala:267:30] wire io_y_pr_0 = io_x_pr_0; // @[package.scala:267:30] wire io_y_ppp_0 = io_x_ppp_0; // @[package.scala:267:30] wire io_y_pal_0 = io_x_pal_0; // @[package.scala:267:30] wire io_y_paa_0 = io_x_paa_0; // @[package.scala:267:30] wire io_y_eff_0 = io_x_eff_0; // @[package.scala:267:30] wire io_y_c_0 = io_x_c_0; // @[package.scala:267:30] wire io_y_fragmented_superpage = io_x_fragmented_superpage_0; // @[package.scala:267:30] assign io_y_u = io_y_u_0; // @[package.scala:267:30] assign io_y_ae_ptw = io_y_ae_ptw_0; // @[package.scala:267:30] assign io_y_ae_final = io_y_ae_final_0; // @[package.scala:267:30] assign io_y_ae_stage2 = io_y_ae_stage2_0; // @[package.scala:267:30] assign io_y_pf = io_y_pf_0; // @[package.scala:267:30] assign io_y_gf = io_y_gf_0; // @[package.scala:267:30] assign io_y_sw = io_y_sw_0; // @[package.scala:267:30] assign io_y_sx = io_y_sx_0; // @[package.scala:267:30] assign io_y_sr = io_y_sr_0; // @[package.scala:267:30] assign io_y_hw = io_y_hw_0; // @[package.scala:267:30] assign io_y_hx = io_y_hx_0; // @[package.scala:267:30] assign io_y_hr = io_y_hr_0; // @[package.scala:267:30] assign io_y_pw = io_y_pw_0; // @[package.scala:267:30] assign io_y_px = io_y_px_0; // @[package.scala:267:30] assign io_y_pr = io_y_pr_0; // @[package.scala:267:30] assign io_y_ppp = io_y_ppp_0; // @[package.scala:267:30] assign io_y_pal = io_y_pal_0; // @[package.scala:267:30] assign io_y_paa = io_y_paa_0; // @[package.scala:267:30] assign io_y_eff = io_y_eff_0; // @[package.scala:267:30] assign io_y_c = io_y_c_0; // @[package.scala:267:30] endmodule
Generate the Verilog code corresponding to the following Chisel files. File btb.scala: package boom.v3.ifu import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import boom.v3.common._ import boom.v3.util.{BoomCoreStringPrefix} import scala.math.min case class BoomBTBParams( nSets: Int = 128, nWays: Int = 2, offsetSz: Int = 13, extendedNSets: Int = 128 ) class BTBBranchPredictorBank(params: BoomBTBParams = BoomBTBParams())(implicit p: Parameters) extends BranchPredictorBank()(p) { override val nSets = params.nSets override val nWays = params.nWays val tagSz = vaddrBitsExtended - log2Ceil(nSets) - log2Ceil(fetchWidth) - 1 val offsetSz = params.offsetSz val extendedNSets = params.extendedNSets require(isPow2(nSets)) require(isPow2(extendedNSets) || extendedNSets == 0) require(extendedNSets <= nSets) require(extendedNSets >= 1) class BTBEntry extends Bundle { val offset = SInt(offsetSz.W) val extended = Bool() } val btbEntrySz = offsetSz + 1 class BTBMeta extends Bundle { val is_br = Bool() val tag = UInt(tagSz.W) } val btbMetaSz = tagSz + 1 class BTBPredictMeta extends Bundle { val write_way = UInt(log2Ceil(nWays).W) } val s1_meta = Wire(new BTBPredictMeta) val f3_meta = RegNext(RegNext(s1_meta)) io.f3_meta := f3_meta.asUInt override val metaSz = s1_meta.asUInt.getWidth val doing_reset = RegInit(true.B) val reset_idx = RegInit(0.U(log2Ceil(nSets).W)) reset_idx := reset_idx + doing_reset when (reset_idx === (nSets-1).U) { doing_reset := false.B } val meta = Seq.fill(nWays) { SyncReadMem(nSets, Vec(bankWidth, UInt(btbMetaSz.W))) } val btb = Seq.fill(nWays) { SyncReadMem(nSets, Vec(bankWidth, UInt(btbEntrySz.W))) } val ebtb = SyncReadMem(extendedNSets, UInt(vaddrBitsExtended.W)) val mems = (((0 until nWays) map ({w:Int => Seq( (f"btb_meta_way$w", nSets, bankWidth * btbMetaSz), (f"btb_data_way$w", nSets, bankWidth * btbEntrySz))})).flatten ++ Seq(("ebtb", extendedNSets, vaddrBitsExtended))) val s1_req_rbtb = VecInit(btb.map { b => VecInit(b.read(s0_idx , s0_valid).map(_.asTypeOf(new BTBEntry))) }) val s1_req_rmeta = VecInit(meta.map { m => VecInit(m.read(s0_idx, s0_valid).map(_.asTypeOf(new BTBMeta))) }) val s1_req_rebtb = ebtb.read(s0_idx, s0_valid) val s1_req_tag = s1_idx >> log2Ceil(nSets) val s1_resp = Wire(Vec(bankWidth, Valid(UInt(vaddrBitsExtended.W)))) val s1_is_br = Wire(Vec(bankWidth, Bool())) val s1_is_jal = Wire(Vec(bankWidth, Bool())) val s1_hit_ohs = VecInit((0 until bankWidth) map { i => VecInit((0 until nWays) map { w => s1_req_rmeta(w)(i).tag === s1_req_tag(tagSz-1,0) }) }) val s1_hits = s1_hit_ohs.map { oh => oh.reduce(_||_) } val s1_hit_ways = s1_hit_ohs.map { oh => PriorityEncoder(oh) } for (w <- 0 until bankWidth) { val entry_meta = s1_req_rmeta(s1_hit_ways(w))(w) val entry_btb = s1_req_rbtb(s1_hit_ways(w))(w) s1_resp(w).valid := !doing_reset && s1_valid && s1_hits(w) s1_resp(w).bits := Mux( entry_btb.extended, s1_req_rebtb, (s1_pc.asSInt + (w << 1).S + entry_btb.offset).asUInt) s1_is_br(w) := !doing_reset && s1_resp(w).valid && entry_meta.is_br s1_is_jal(w) := !doing_reset && s1_resp(w).valid && !entry_meta.is_br io.resp.f2(w) := io.resp_in(0).f2(w) io.resp.f3(w) := io.resp_in(0).f3(w) when (RegNext(s1_hits(w))) { io.resp.f2(w).predicted_pc := RegNext(s1_resp(w)) io.resp.f2(w).is_br := RegNext(s1_is_br(w)) io.resp.f2(w).is_jal := RegNext(s1_is_jal(w)) when (RegNext(s1_is_jal(w))) { io.resp.f2(w).taken := true.B } } when (RegNext(RegNext(s1_hits(w)))) { io.resp.f3(w).predicted_pc := RegNext(io.resp.f2(w).predicted_pc) io.resp.f3(w).is_br := RegNext(io.resp.f2(w).is_br) io.resp.f3(w).is_jal := RegNext(io.resp.f2(w).is_jal) when (RegNext(RegNext(s1_is_jal(w)))) { io.resp.f3(w).taken := true.B } } } val alloc_way = if (nWays > 1) { val r_metas = Cat(VecInit(s1_req_rmeta.map { w => VecInit(w.map(_.tag)) }).asUInt, s1_req_tag(tagSz-1,0)) val l = log2Ceil(nWays) val nChunks = (r_metas.getWidth + l - 1) / l val chunks = (0 until nChunks) map { i => r_metas(min((i+1)*l, r_metas.getWidth)-1, i*l) } chunks.reduce(_^_) } else { 0.U } s1_meta.write_way := Mux(s1_hits.reduce(_||_), PriorityEncoder(s1_hit_ohs.map(_.asUInt).reduce(_|_)), alloc_way) val s1_update_cfi_idx = s1_update.bits.cfi_idx.bits val s1_update_meta = s1_update.bits.meta.asTypeOf(new BTBPredictMeta) val max_offset_value = Cat(0.B, ~(0.U((offsetSz-1).W))).asSInt val min_offset_value = Cat(1.B, (0.U((offsetSz-1).W))).asSInt val new_offset_value = (s1_update.bits.target.asSInt - (s1_update.bits.pc + (s1_update.bits.cfi_idx.bits << 1)).asSInt) val offset_is_extended = (new_offset_value > max_offset_value || new_offset_value < min_offset_value) val s1_update_wbtb_data = Wire(new BTBEntry) s1_update_wbtb_data.extended := offset_is_extended s1_update_wbtb_data.offset := new_offset_value val s1_update_wbtb_mask = (UIntToOH(s1_update_cfi_idx) & Fill(bankWidth, s1_update.bits.cfi_idx.valid && s1_update.valid && s1_update.bits.cfi_taken && s1_update.bits.is_commit_update)) val s1_update_wmeta_mask = ((s1_update_wbtb_mask | s1_update.bits.br_mask) & (Fill(bankWidth, s1_update.valid && s1_update.bits.is_commit_update) | (Fill(bankWidth, s1_update.valid) & s1_update.bits.btb_mispredicts) ) ) val s1_update_wmeta_data = Wire(Vec(bankWidth, new BTBMeta)) for (w <- 0 until bankWidth) { s1_update_wmeta_data(w).tag := Mux(s1_update.bits.btb_mispredicts(w), 0.U, s1_update_idx >> log2Ceil(nSets)) s1_update_wmeta_data(w).is_br := s1_update.bits.br_mask(w) } for (w <- 0 until nWays) { when (doing_reset || s1_update_meta.write_way === w.U || (w == 0 && nWays == 1).B) { btb(w).write( Mux(doing_reset, reset_idx, s1_update_idx), Mux(doing_reset, VecInit(Seq.fill(bankWidth) { 0.U(btbEntrySz.W) }), VecInit(Seq.fill(bankWidth) { s1_update_wbtb_data.asUInt })), Mux(doing_reset, (~(0.U(bankWidth.W))), s1_update_wbtb_mask).asBools ) meta(w).write( Mux(doing_reset, reset_idx, s1_update_idx), Mux(doing_reset, VecInit(Seq.fill(bankWidth) { 0.U(btbMetaSz.W) }), VecInit(s1_update_wmeta_data.map(_.asUInt))), Mux(doing_reset, (~(0.U(bankWidth.W))), s1_update_wmeta_mask).asBools ) } } when (s1_update_wbtb_mask =/= 0.U && offset_is_extended) { ebtb.write(s1_update_idx, s1_update.bits.target) } }
module meta_0_2( // @[btb.scala:65:47] input [6:0] R0_addr, input R0_en, input R0_clk, output [119:0] R0_data, input [6:0] W0_addr, input W0_en, input W0_clk, input [119:0] W0_data, input [3:0] W0_mask ); meta_0_ext meta_0_ext ( // @[btb.scala:65:47] .R0_addr (R0_addr), .R0_en (R0_en), .R0_clk (R0_clk), .R0_data (R0_data), .W0_addr (W0_addr), .W0_en (W0_en), .W0_clk (W0_clk), .W0_data (W0_data), .W0_mask (W0_mask) ); // @[btb.scala:65:47] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_104( // @[AsyncQueue.scala:58:7] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in = 1'h1; // @[ShiftReg.scala:45:23] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_111 io_out_source_valid_0 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File MSHR.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import freechips.rocketchip.tilelink._ import TLPermissions._ import TLMessages._ import MetaData._ import chisel3.PrintableHelper import chisel3.experimental.dataview._ class ScheduleRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val a = Valid(new SourceARequest(params)) val b = Valid(new SourceBRequest(params)) val c = Valid(new SourceCRequest(params)) val d = Valid(new SourceDRequest(params)) val e = Valid(new SourceERequest(params)) val x = Valid(new SourceXRequest(params)) val dir = Valid(new DirectoryWrite(params)) val reload = Bool() // get next request via allocate (if any) } class MSHRStatus(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val set = UInt(params.setBits.W) val tag = UInt(params.tagBits.W) val way = UInt(params.wayBits.W) val blockB = Bool() val nestB = Bool() val blockC = Bool() val nestC = Bool() } class NestedWriteback(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val set = UInt(params.setBits.W) val tag = UInt(params.tagBits.W) val b_toN = Bool() // nested Probes may unhit us val b_toB = Bool() // nested Probes may demote us val b_clr_dirty = Bool() // nested Probes clear dirty val c_set_dirty = Bool() // nested Releases MAY set dirty } sealed trait CacheState { val code = CacheState.index.U CacheState.index = CacheState.index + 1 } object CacheState { var index = 0 } case object S_INVALID extends CacheState case object S_BRANCH extends CacheState case object S_BRANCH_C extends CacheState case object S_TIP extends CacheState case object S_TIP_C extends CacheState case object S_TIP_CD extends CacheState case object S_TIP_D extends CacheState case object S_TRUNK_C extends CacheState case object S_TRUNK_CD extends CacheState class MSHR(params: InclusiveCacheParameters) extends Module { val io = IO(new Bundle { val allocate = Flipped(Valid(new AllocateRequest(params))) // refills MSHR for next cycle val directory = Flipped(Valid(new DirectoryResult(params))) // triggers schedule setup val status = Valid(new MSHRStatus(params)) val schedule = Decoupled(new ScheduleRequest(params)) val sinkc = Flipped(Valid(new SinkCResponse(params))) val sinkd = Flipped(Valid(new SinkDResponse(params))) val sinke = Flipped(Valid(new SinkEResponse(params))) val nestedwb = Flipped(new NestedWriteback(params)) }) val request_valid = RegInit(false.B) val request = Reg(new FullRequest(params)) val meta_valid = RegInit(false.B) val meta = Reg(new DirectoryResult(params)) // Define which states are valid when (meta_valid) { when (meta.state === INVALID) { assert (!meta.clients.orR) assert (!meta.dirty) } when (meta.state === BRANCH) { assert (!meta.dirty) } when (meta.state === TRUNK) { assert (meta.clients.orR) assert ((meta.clients & (meta.clients - 1.U)) === 0.U) // at most one } when (meta.state === TIP) { // noop } } // Completed transitions (s_ = scheduled), (w_ = waiting) val s_rprobe = RegInit(true.B) // B val w_rprobeackfirst = RegInit(true.B) val w_rprobeacklast = RegInit(true.B) val s_release = RegInit(true.B) // CW w_rprobeackfirst val w_releaseack = RegInit(true.B) val s_pprobe = RegInit(true.B) // B val s_acquire = RegInit(true.B) // A s_release, s_pprobe [1] val s_flush = RegInit(true.B) // X w_releaseack val w_grantfirst = RegInit(true.B) val w_grantlast = RegInit(true.B) val w_grant = RegInit(true.B) // first | last depending on wormhole val w_pprobeackfirst = RegInit(true.B) val w_pprobeacklast = RegInit(true.B) val w_pprobeack = RegInit(true.B) // first | last depending on wormhole val s_probeack = RegInit(true.B) // C w_pprobeackfirst (mutually exclusive with next two s_*) val s_grantack = RegInit(true.B) // E w_grantfirst ... CAN require both outE&inD to service outD val s_execute = RegInit(true.B) // D w_pprobeack, w_grant val w_grantack = RegInit(true.B) val s_writeback = RegInit(true.B) // W w_* // [1]: We cannot issue outer Acquire while holding blockB (=> outA can stall) // However, inB and outC are higher priority than outB, so s_release and s_pprobe // may be safely issued while blockB. Thus we must NOT try to schedule the // potentially stuck s_acquire with either of them (scheduler is all or none). // Meta-data that we discover underway val sink = Reg(UInt(params.outer.bundle.sinkBits.W)) val gotT = Reg(Bool()) val bad_grant = Reg(Bool()) val probes_done = Reg(UInt(params.clientBits.W)) val probes_toN = Reg(UInt(params.clientBits.W)) val probes_noT = Reg(Bool()) // When a nested transaction completes, update our meta data when (meta_valid && meta.state =/= INVALID && io.nestedwb.set === request.set && io.nestedwb.tag === meta.tag) { when (io.nestedwb.b_clr_dirty) { meta.dirty := false.B } when (io.nestedwb.c_set_dirty) { meta.dirty := true.B } when (io.nestedwb.b_toB) { meta.state := BRANCH } when (io.nestedwb.b_toN) { meta.hit := false.B } } // Scheduler status io.status.valid := request_valid io.status.bits.set := request.set io.status.bits.tag := request.tag io.status.bits.way := meta.way io.status.bits.blockB := !meta_valid || ((!w_releaseack || !w_rprobeacklast || !w_pprobeacklast) && !w_grantfirst) io.status.bits.nestB := meta_valid && w_releaseack && w_rprobeacklast && w_pprobeacklast && !w_grantfirst // The above rules ensure we will block and not nest an outer probe while still doing our // own inner probes. Thus every probe wakes exactly one MSHR. io.status.bits.blockC := !meta_valid io.status.bits.nestC := meta_valid && (!w_rprobeackfirst || !w_pprobeackfirst || !w_grantfirst) // The w_grantfirst in nestC is necessary to deal with: // acquire waiting for grant, inner release gets queued, outer probe -> inner probe -> deadlock // ... this is possible because the release+probe can be for same set, but different tag // We can only demand: block, nest, or queue assert (!io.status.bits.nestB || !io.status.bits.blockB) assert (!io.status.bits.nestC || !io.status.bits.blockC) // Scheduler requests val no_wait = w_rprobeacklast && w_releaseack && w_grantlast && w_pprobeacklast && w_grantack io.schedule.bits.a.valid := !s_acquire && s_release && s_pprobe io.schedule.bits.b.valid := !s_rprobe || !s_pprobe io.schedule.bits.c.valid := (!s_release && w_rprobeackfirst) || (!s_probeack && w_pprobeackfirst) io.schedule.bits.d.valid := !s_execute && w_pprobeack && w_grant io.schedule.bits.e.valid := !s_grantack && w_grantfirst io.schedule.bits.x.valid := !s_flush && w_releaseack io.schedule.bits.dir.valid := (!s_release && w_rprobeackfirst) || (!s_writeback && no_wait) io.schedule.bits.reload := no_wait io.schedule.valid := io.schedule.bits.a.valid || io.schedule.bits.b.valid || io.schedule.bits.c.valid || io.schedule.bits.d.valid || io.schedule.bits.e.valid || io.schedule.bits.x.valid || io.schedule.bits.dir.valid // Schedule completions when (io.schedule.ready) { s_rprobe := true.B when (w_rprobeackfirst) { s_release := true.B } s_pprobe := true.B when (s_release && s_pprobe) { s_acquire := true.B } when (w_releaseack) { s_flush := true.B } when (w_pprobeackfirst) { s_probeack := true.B } when (w_grantfirst) { s_grantack := true.B } when (w_pprobeack && w_grant) { s_execute := true.B } when (no_wait) { s_writeback := true.B } // Await the next operation when (no_wait) { request_valid := false.B meta_valid := false.B } } // Resulting meta-data val final_meta_writeback = WireInit(meta) val req_clientBit = params.clientBit(request.source) val req_needT = needT(request.opcode, request.param) val req_acquire = request.opcode === AcquireBlock || request.opcode === AcquirePerm val meta_no_clients = !meta.clients.orR val req_promoteT = req_acquire && Mux(meta.hit, meta_no_clients && meta.state === TIP, gotT) when (request.prio(2) && (!params.firstLevel).B) { // always a hit final_meta_writeback.dirty := meta.dirty || request.opcode(0) final_meta_writeback.state := Mux(request.param =/= TtoT && meta.state === TRUNK, TIP, meta.state) final_meta_writeback.clients := meta.clients & ~Mux(isToN(request.param), req_clientBit, 0.U) final_meta_writeback.hit := true.B // chained requests are hits } .elsewhen (request.control && params.control.B) { // request.prio(0) when (meta.hit) { final_meta_writeback.dirty := false.B final_meta_writeback.state := INVALID final_meta_writeback.clients := meta.clients & ~probes_toN } final_meta_writeback.hit := false.B } .otherwise { final_meta_writeback.dirty := (meta.hit && meta.dirty) || !request.opcode(2) final_meta_writeback.state := Mux(req_needT, Mux(req_acquire, TRUNK, TIP), Mux(!meta.hit, Mux(gotT, Mux(req_acquire, TRUNK, TIP), BRANCH), MuxLookup(meta.state, 0.U(2.W))(Seq( INVALID -> BRANCH, BRANCH -> BRANCH, TRUNK -> TIP, TIP -> Mux(meta_no_clients && req_acquire, TRUNK, TIP))))) final_meta_writeback.clients := Mux(meta.hit, meta.clients & ~probes_toN, 0.U) | Mux(req_acquire, req_clientBit, 0.U) final_meta_writeback.tag := request.tag final_meta_writeback.hit := true.B } when (bad_grant) { when (meta.hit) { // upgrade failed (B -> T) assert (!meta_valid || meta.state === BRANCH) final_meta_writeback.hit := true.B final_meta_writeback.dirty := false.B final_meta_writeback.state := BRANCH final_meta_writeback.clients := meta.clients & ~probes_toN } .otherwise { // failed N -> (T or B) final_meta_writeback.hit := false.B final_meta_writeback.dirty := false.B final_meta_writeback.state := INVALID final_meta_writeback.clients := 0.U } } val invalid = Wire(new DirectoryEntry(params)) invalid.dirty := false.B invalid.state := INVALID invalid.clients := 0.U invalid.tag := 0.U // Just because a client says BtoT, by the time we process the request he may be N. // Therefore, we must consult our own meta-data state to confirm he owns the line still. val honour_BtoT = meta.hit && (meta.clients & req_clientBit).orR // The client asking us to act is proof they don't have permissions. val excluded_client = Mux(meta.hit && request.prio(0) && skipProbeN(request.opcode, params.cache.hintsSkipProbe), req_clientBit, 0.U) io.schedule.bits.a.bits.tag := request.tag io.schedule.bits.a.bits.set := request.set io.schedule.bits.a.bits.param := Mux(req_needT, Mux(meta.hit, BtoT, NtoT), NtoB) io.schedule.bits.a.bits.block := request.size =/= log2Ceil(params.cache.blockBytes).U || !(request.opcode === PutFullData || request.opcode === AcquirePerm) io.schedule.bits.a.bits.source := 0.U io.schedule.bits.b.bits.param := Mux(!s_rprobe, toN, Mux(request.prio(1), request.param, Mux(req_needT, toN, toB))) io.schedule.bits.b.bits.tag := Mux(!s_rprobe, meta.tag, request.tag) io.schedule.bits.b.bits.set := request.set io.schedule.bits.b.bits.clients := meta.clients & ~excluded_client io.schedule.bits.c.bits.opcode := Mux(meta.dirty, ReleaseData, Release) io.schedule.bits.c.bits.param := Mux(meta.state === BRANCH, BtoN, TtoN) io.schedule.bits.c.bits.source := 0.U io.schedule.bits.c.bits.tag := meta.tag io.schedule.bits.c.bits.set := request.set io.schedule.bits.c.bits.way := meta.way io.schedule.bits.c.bits.dirty := meta.dirty io.schedule.bits.d.bits.viewAsSupertype(chiselTypeOf(request)) := request io.schedule.bits.d.bits.param := Mux(!req_acquire, request.param, MuxLookup(request.param, request.param)(Seq( NtoB -> Mux(req_promoteT, NtoT, NtoB), BtoT -> Mux(honour_BtoT, BtoT, NtoT), NtoT -> NtoT))) io.schedule.bits.d.bits.sink := 0.U io.schedule.bits.d.bits.way := meta.way io.schedule.bits.d.bits.bad := bad_grant io.schedule.bits.e.bits.sink := sink io.schedule.bits.x.bits.fail := false.B io.schedule.bits.dir.bits.set := request.set io.schedule.bits.dir.bits.way := meta.way io.schedule.bits.dir.bits.data := Mux(!s_release, invalid, WireInit(new DirectoryEntry(params), init = final_meta_writeback)) // Coverage of state transitions def cacheState(entry: DirectoryEntry, hit: Bool) = { val out = WireDefault(0.U) val c = entry.clients.orR val d = entry.dirty switch (entry.state) { is (BRANCH) { out := Mux(c, S_BRANCH_C.code, S_BRANCH.code) } is (TRUNK) { out := Mux(d, S_TRUNK_CD.code, S_TRUNK_C.code) } is (TIP) { out := Mux(c, Mux(d, S_TIP_CD.code, S_TIP_C.code), Mux(d, S_TIP_D.code, S_TIP.code)) } is (INVALID) { out := S_INVALID.code } } when (!hit) { out := S_INVALID.code } out } val p = !params.lastLevel // can be probed val c = !params.firstLevel // can be acquired val m = params.inner.client.clients.exists(!_.supports.probe) // can be written (or read) val r = params.outer.manager.managers.exists(!_.alwaysGrantsT) // read-only devices exist val f = params.control // flush control register exists val cfg = (p, c, m, r, f) val b = r || p // can reach branch state (via probe downgrade or read-only device) // The cache must be used for something or we would not be here require(c || m) val evict = cacheState(meta, !meta.hit) val before = cacheState(meta, meta.hit) val after = cacheState(final_meta_writeback, true.B) def eviction(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(evict === from.code, s"MSHR_${from}_EVICT", s"State transition from ${from} to evicted ${cfg}") } else { assert(!(evict === from.code), cf"State transition from ${from} to evicted should be impossible ${cfg}") } if (cover && f) { params.ccover(before === from.code, s"MSHR_${from}_FLUSH", s"State transition from ${from} to flushed ${cfg}") } else { assert(!(before === from.code), cf"State transition from ${from} to flushed should be impossible ${cfg}") } } def transition(from: CacheState, to: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(before === from.code && after === to.code, s"MSHR_${from}_${to}", s"State transition from ${from} to ${to} ${cfg}") } else { assert(!(before === from.code && after === to.code), cf"State transition from ${from} to ${to} should be impossible ${cfg}") } } when ((!s_release && w_rprobeackfirst) && io.schedule.ready) { eviction(S_BRANCH, b) // MMIO read to read-only device eviction(S_BRANCH_C, b && c) // you need children to become C eviction(S_TIP, true) // MMIO read || clean release can lead to this state eviction(S_TIP_C, c) // needs two clients || client + mmio || downgrading client eviction(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client eviction(S_TIP_D, true) // MMIO write || dirty release lead here eviction(S_TRUNK_C, c) // acquire for write eviction(S_TRUNK_CD, c) // dirty release then reacquire } when ((!s_writeback && no_wait) && io.schedule.ready) { transition(S_INVALID, S_BRANCH, b && m) // only MMIO can bring us to BRANCH state transition(S_INVALID, S_BRANCH_C, b && c) // C state is only possible if there are inner caches transition(S_INVALID, S_TIP, m) // MMIO read transition(S_INVALID, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_INVALID, S_TIP_CD, false) // acquire does not cause dirty immediately transition(S_INVALID, S_TIP_D, m) // MMIO write transition(S_INVALID, S_TRUNK_C, c) // acquire transition(S_INVALID, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH, S_INVALID, b && p) // probe can do this (flushes run as evictions) transition(S_BRANCH, S_BRANCH_C, b && c) // acquire transition(S_BRANCH, S_TIP, b && m) // prefetch write transition(S_BRANCH, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_BRANCH, S_TIP_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH, S_TIP_D, b && m) // MMIO write transition(S_BRANCH, S_TRUNK_C, b && c) // acquire transition(S_BRANCH, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH_C, S_INVALID, b && c && p) transition(S_BRANCH_C, S_BRANCH, b && c) // clean release (optional) transition(S_BRANCH_C, S_TIP, b && c && m) // prefetch write transition(S_BRANCH_C, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_BRANCH_C, S_TIP_D, b && c && m) // MMIO write transition(S_BRANCH_C, S_TIP_CD, false) // going dirty means we must shoot down clients transition(S_BRANCH_C, S_TRUNK_C, b && c) // acquire transition(S_BRANCH_C, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_TIP, S_INVALID, p) transition(S_TIP, S_BRANCH, p) // losing TIP only possible via probe transition(S_TIP, S_BRANCH_C, false) // we would go S_TRUNK_C instead transition(S_TIP, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP, S_TIP_D, m) // direct dirty only via MMIO write transition(S_TIP, S_TIP_CD, false) // acquire does not make us dirty immediately transition(S_TIP, S_TRUNK_C, c) // acquire transition(S_TIP, S_TRUNK_CD, false) // acquire does not make us dirty immediately transition(S_TIP_C, S_INVALID, c && p) transition(S_TIP_C, S_BRANCH, c && p) // losing TIP only possible via probe transition(S_TIP_C, S_BRANCH_C, c && p) // losing TIP only possible via probe transition(S_TIP_C, S_TIP, c) // probed while MMIO read || clean release (optional) transition(S_TIP_C, S_TIP_D, c && m) // direct dirty only via MMIO write transition(S_TIP_C, S_TIP_CD, false) // going dirty means we must shoot down clients transition(S_TIP_C, S_TRUNK_C, c) // acquire transition(S_TIP_C, S_TRUNK_CD, false) // acquire does not make us immediately dirty transition(S_TIP_D, S_INVALID, p) transition(S_TIP_D, S_BRANCH, p) // losing D is only possible via probe transition(S_TIP_D, S_BRANCH_C, p && c) // probed while acquire shared transition(S_TIP_D, S_TIP, p) // probed while MMIO read || outer probe.toT (optional) transition(S_TIP_D, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP_D, S_TIP_CD, false) // we would go S_TRUNK_CD instead transition(S_TIP_D, S_TRUNK_C, p && c) // probed while acquired transition(S_TIP_D, S_TRUNK_CD, c) // acquire transition(S_TIP_CD, S_INVALID, c && p) transition(S_TIP_CD, S_BRANCH, c && p) // losing D is only possible via probe transition(S_TIP_CD, S_BRANCH_C, c && p) // losing D is only possible via probe transition(S_TIP_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional) transition(S_TIP_CD, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP_CD, S_TIP_D, c) // MMIO write || clean release (optional) transition(S_TIP_CD, S_TRUNK_C, c && p) // probed while acquire transition(S_TIP_CD, S_TRUNK_CD, c) // acquire transition(S_TRUNK_C, S_INVALID, c && p) transition(S_TRUNK_C, S_BRANCH, c && p) // losing TIP only possible via probe transition(S_TRUNK_C, S_BRANCH_C, c && p) // losing TIP only possible via probe transition(S_TRUNK_C, S_TIP, c) // MMIO read || clean release (optional) transition(S_TRUNK_C, S_TIP_C, c) // bounce shared transition(S_TRUNK_C, S_TIP_D, c) // dirty release transition(S_TRUNK_C, S_TIP_CD, c) // dirty bounce shared transition(S_TRUNK_C, S_TRUNK_CD, c) // dirty bounce transition(S_TRUNK_CD, S_INVALID, c && p) transition(S_TRUNK_CD, S_BRANCH, c && p) // losing D only possible via probe transition(S_TRUNK_CD, S_BRANCH_C, c && p) // losing D only possible via probe transition(S_TRUNK_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional) transition(S_TRUNK_CD, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TRUNK_CD, S_TIP_D, c) // dirty release transition(S_TRUNK_CD, S_TIP_CD, c) // bounce shared transition(S_TRUNK_CD, S_TRUNK_C, c && p) // probed while acquire } // Handle response messages val probe_bit = params.clientBit(io.sinkc.bits.source) val last_probe = (probes_done | probe_bit) === (meta.clients & ~excluded_client) val probe_toN = isToN(io.sinkc.bits.param) if (!params.firstLevel) when (io.sinkc.valid) { params.ccover( probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_FULL", "Client downgraded to N when asked only to do B") params.ccover(!probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_HALF", "Client downgraded to B when asked only to do B") // Caution: the probe matches us only in set. // We would never allow an outer probe to nest until both w_[rp]probeack complete, so // it is safe to just unguardedly update the probe FSM. probes_done := probes_done | probe_bit probes_toN := probes_toN | Mux(probe_toN, probe_bit, 0.U) probes_noT := probes_noT || io.sinkc.bits.param =/= TtoT w_rprobeackfirst := w_rprobeackfirst || last_probe w_rprobeacklast := w_rprobeacklast || (last_probe && io.sinkc.bits.last) w_pprobeackfirst := w_pprobeackfirst || last_probe w_pprobeacklast := w_pprobeacklast || (last_probe && io.sinkc.bits.last) // Allow wormhole routing from sinkC if the first request beat has offset 0 val set_pprobeack = last_probe && (io.sinkc.bits.last || request.offset === 0.U) w_pprobeack := w_pprobeack || set_pprobeack params.ccover(!set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_SERIAL", "Sequential routing of probe response data") params.ccover( set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_WORMHOLE", "Wormhole routing of probe response data") // However, meta-data updates need to be done more cautiously when (meta.state =/= INVALID && io.sinkc.bits.tag === meta.tag && io.sinkc.bits.data) { meta.dirty := true.B } // !!! } when (io.sinkd.valid) { when (io.sinkd.bits.opcode === Grant || io.sinkd.bits.opcode === GrantData) { sink := io.sinkd.bits.sink w_grantfirst := true.B w_grantlast := io.sinkd.bits.last // Record if we need to prevent taking ownership bad_grant := io.sinkd.bits.denied // Allow wormhole routing for requests whose first beat has offset 0 w_grant := request.offset === 0.U || io.sinkd.bits.last params.ccover(io.sinkd.bits.opcode === GrantData && request.offset === 0.U, "MSHR_GRANT_WORMHOLE", "Wormhole routing of grant response data") params.ccover(io.sinkd.bits.opcode === GrantData && request.offset =/= 0.U, "MSHR_GRANT_SERIAL", "Sequential routing of grant response data") gotT := io.sinkd.bits.param === toT } .elsewhen (io.sinkd.bits.opcode === ReleaseAck) { w_releaseack := true.B } } when (io.sinke.valid) { w_grantack := true.B } // Bootstrap new requests val allocate_as_full = WireInit(new FullRequest(params), init = io.allocate.bits) val new_meta = Mux(io.allocate.valid && io.allocate.bits.repeat, final_meta_writeback, io.directory.bits) val new_request = Mux(io.allocate.valid, allocate_as_full, request) val new_needT = needT(new_request.opcode, new_request.param) val new_clientBit = params.clientBit(new_request.source) val new_skipProbe = Mux(skipProbeN(new_request.opcode, params.cache.hintsSkipProbe), new_clientBit, 0.U) val prior = cacheState(final_meta_writeback, true.B) def bypass(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(prior === from.code, s"MSHR_${from}_BYPASS", s"State bypass transition from ${from} ${cfg}") } else { assert(!(prior === from.code), cf"State bypass from ${from} should be impossible ${cfg}") } } when (io.allocate.valid && io.allocate.bits.repeat) { bypass(S_INVALID, f || p) // Can lose permissions (probe/flush) bypass(S_BRANCH, b) // MMIO read to read-only device bypass(S_BRANCH_C, b && c) // you need children to become C bypass(S_TIP, true) // MMIO read || clean release can lead to this state bypass(S_TIP_C, c) // needs two clients || client + mmio || downgrading client bypass(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client bypass(S_TIP_D, true) // MMIO write || dirty release lead here bypass(S_TRUNK_C, c) // acquire for write bypass(S_TRUNK_CD, c) // dirty release then reacquire } when (io.allocate.valid) { assert (!request_valid || (no_wait && io.schedule.fire)) request_valid := true.B request := io.allocate.bits } // Create execution plan when (io.directory.valid || (io.allocate.valid && io.allocate.bits.repeat)) { meta_valid := true.B meta := new_meta probes_done := 0.U probes_toN := 0.U probes_noT := false.B gotT := false.B bad_grant := false.B // These should already be either true or turning true // We clear them here explicitly to simplify the mux tree s_rprobe := true.B w_rprobeackfirst := true.B w_rprobeacklast := true.B s_release := true.B w_releaseack := true.B s_pprobe := true.B s_acquire := true.B s_flush := true.B w_grantfirst := true.B w_grantlast := true.B w_grant := true.B w_pprobeackfirst := true.B w_pprobeacklast := true.B w_pprobeack := true.B s_probeack := true.B s_grantack := true.B s_execute := true.B w_grantack := true.B s_writeback := true.B // For C channel requests (ie: Release[Data]) when (new_request.prio(2) && (!params.firstLevel).B) { s_execute := false.B // Do we need to go dirty? when (new_request.opcode(0) && !new_meta.dirty) { s_writeback := false.B } // Does our state change? when (isToB(new_request.param) && new_meta.state === TRUNK) { s_writeback := false.B } // Do our clients change? when (isToN(new_request.param) && (new_meta.clients & new_clientBit) =/= 0.U) { s_writeback := false.B } assert (new_meta.hit) } // For X channel requests (ie: flush) .elsewhen (new_request.control && params.control.B) { // new_request.prio(0) s_flush := false.B // Do we need to actually do something? when (new_meta.hit) { s_release := false.B w_releaseack := false.B // Do we need to shoot-down inner caches? when ((!params.firstLevel).B && (new_meta.clients =/= 0.U)) { s_rprobe := false.B w_rprobeackfirst := false.B w_rprobeacklast := false.B } } } // For A channel requests .otherwise { // new_request.prio(0) && !new_request.control s_execute := false.B // Do we need an eviction? when (!new_meta.hit && new_meta.state =/= INVALID) { s_release := false.B w_releaseack := false.B // Do we need to shoot-down inner caches? when ((!params.firstLevel).B & (new_meta.clients =/= 0.U)) { s_rprobe := false.B w_rprobeackfirst := false.B w_rprobeacklast := false.B } } // Do we need an acquire? when (!new_meta.hit || (new_meta.state === BRANCH && new_needT)) { s_acquire := false.B w_grantfirst := false.B w_grantlast := false.B w_grant := false.B s_grantack := false.B s_writeback := false.B } // Do we need a probe? when ((!params.firstLevel).B && (new_meta.hit && (new_needT || new_meta.state === TRUNK) && (new_meta.clients & ~new_skipProbe) =/= 0.U)) { s_pprobe := false.B w_pprobeackfirst := false.B w_pprobeacklast := false.B w_pprobeack := false.B s_writeback := false.B } // Do we need a grantack? when (new_request.opcode === AcquireBlock || new_request.opcode === AcquirePerm) { w_grantack := false.B s_writeback := false.B } // Becomes dirty? when (!new_request.opcode(2) && new_meta.hit && !new_meta.dirty) { s_writeback := false.B } } } } File Parameters.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ import freechips.rocketchip.util.property.cover import scala.math.{min,max} case class CacheParameters( level: Int, ways: Int, sets: Int, blockBytes: Int, beatBytes: Int, // inner hintsSkipProbe: Boolean) { require (ways > 0) require (sets > 0) require (blockBytes > 0 && isPow2(blockBytes)) require (beatBytes > 0 && isPow2(beatBytes)) require (blockBytes >= beatBytes) val blocks = ways * sets val sizeBytes = blocks * blockBytes val blockBeats = blockBytes/beatBytes } case class InclusiveCachePortParameters( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams) { def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e)) } object InclusiveCachePortParameters { val none = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.none) val full = InclusiveCachePortParameters( a = BufferParams.default, b = BufferParams.default, c = BufferParams.default, d = BufferParams.default, e = BufferParams.default) // This removes feed-through paths from C=>A and A=>C val fullC = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.default, d = BufferParams.none, e = BufferParams.none) val flowAD = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.flow, e = BufferParams.none) val flowAE = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.flow) // For innerBuf: // SinkA: no restrictions, flows into scheduler+putbuffer // SourceB: no restrictions, flows out of scheduler // sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore // SourceD: no restrictions, flows out of bankedStore/regout // SinkE: no restrictions, flows into scheduler // // ... so while none is possible, you probably want at least flowAC to cut ready // from the scheduler delay and flowD to ease SourceD back-pressure // For outerBufer: // SourceA: must not be pipe, flows out of scheduler // SinkB: no restrictions, flows into scheduler // SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored // SinkD: no restrictions, flows into scheduler & bankedStore // SourceE: must not be pipe, flows out of scheduler // // ... AE take the channel ready into the scheduler, so you need at least flowAE } case class InclusiveCacheMicroParameters( writeBytes: Int, // backing store update granularity memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz) portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes dirReg: Boolean = false, innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE { require (writeBytes > 0 && isPow2(writeBytes)) require (memCycles > 0) require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant } case class InclusiveCacheControlParameters( address: BigInt, beatBytes: Int, bankedControl: Boolean) case class InclusiveCacheParameters( cache: CacheParameters, micro: InclusiveCacheMicroParameters, control: Boolean, inner: TLEdgeIn, outer: TLEdgeOut)(implicit val p: Parameters) { require (cache.ways > 1) require (cache.sets > 1 && isPow2(cache.sets)) require (micro.writeBytes <= inner.manager.beatBytes) require (micro.writeBytes <= outer.manager.beatBytes) require (inner.manager.beatBytes <= cache.blockBytes) require (outer.manager.beatBytes <= cache.blockBytes) // Require that all cached address ranges have contiguous blocks outer.manager.managers.flatMap(_.address).foreach { a => require (a.alignment >= cache.blockBytes) } // If we are the first level cache, we do not need to support inner-BCE val firstLevel = !inner.client.clients.exists(_.supports.probe) // If we are the last level cache, we do not need to support outer-B val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED) require (lastLevel) // Provision enough resources to achieve full throughput with missing single-beat accesses val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro) val secondary = max(mshrs, micro.memCycles - mshrs) val putLists = micro.memCycles // allow every request to be single beat val putBeats = max(2*cache.blockBeats, micro.memCycles) val relLists = 2 val relBeats = relLists*cache.blockBeats val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address)) val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_)) def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] = if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail) val addressMapping = bitOffsets(pickMask) val addressBits = addressMapping.size // println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}") val allClients = inner.client.clients.size val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size val clientBits = max(1, clientBitsRaw) val stateBits = 2 val wayBits = log2Ceil(cache.ways) val setBits = log2Ceil(cache.sets) val offsetBits = log2Ceil(cache.blockBytes) val tagBits = addressBits - setBits - offsetBits val putBits = log2Ceil(max(putLists, relLists)) require (tagBits > 0) require (offsetBits > 0) val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1 val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1 val innerMaskBits = inner.manager.beatBytes / micro.writeBytes val outerMaskBits = outer.manager.beatBytes / micro.writeBytes def clientBit(source: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse) } } def clientSource(bit: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U)) } } def parseAddress(x: UInt): (UInt, UInt, UInt) = { val offset = Cat(addressMapping.map(o => x(o,o)).reverse) val set = offset >> offsetBits val tag = set >> setBits (tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0)) } def widen(x: UInt, width: Int): UInt = { val y = x | 0.U(width.W) assert (y >> width === 0.U) y(width-1, 0) } def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = { val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits)) val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) } addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) } Cat(bits.reverse) } def restoreAddress(expanded: UInt): UInt = { val missingBits = flatAddresses .map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match .groupBy(_._1) .view .mapValues(_.map(_._2)) val muxMask = AddressDecoder(missingBits.values.toList) val mux = missingBits.toList.map { case (bits, addrs) => val widen = addrs.map(_.widen(~muxMask)) val matches = AddressSet .unify(widen.distinct) .map(_.contains(expanded)) .reduce(_ || _) (matches, bits.U) } expanded | Mux1H(mux) } def dirReg[T <: Data](x: T, en: Bool = true.B): T = { if (micro.dirReg) RegEnable(x, en) else x } def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc) } object MetaData { val stateBits = 2 def INVALID: UInt = 0.U(stateBits.W) // way is empty def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch // Does a request need trunk? def needT(opcode: UInt, param: UInt): Bool = { !opcode(2) || (opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) || ((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB) } // Does a request prove the client need not be probed? def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = { // Acquire(toB) and Get => is N, so no probe // Acquire(*toT) => is N or B, but need T, so no probe // Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client // Put* => is N or B, so probe IS needed opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B) } def isToN(param: UInt): Bool = { param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN } def isToB(param: UInt): Bool = { param === TLPermissions.TtoB || param === TLPermissions.BtoB } } object InclusiveCacheParameters { val lfsrBits = 10 val L2ControlAddress = 0x2010000 val L2ControlSize = 0x1000 def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = { // We need 2-3 normal MSHRs to cover the Directory latency // To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats) } def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = // We need a dedicated MSHR for B+C each 2 + out_mshrs(cache, micro) } class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
module MSHR( // @[MSHR.scala:84:7] input clock, // @[MSHR.scala:84:7] input reset, // @[MSHR.scala:84:7] input io_allocate_valid, // @[MSHR.scala:86:14] input io_allocate_bits_prio_0, // @[MSHR.scala:86:14] input io_allocate_bits_prio_1, // @[MSHR.scala:86:14] input io_allocate_bits_prio_2, // @[MSHR.scala:86:14] input io_allocate_bits_control, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_opcode, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_param, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_size, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_source, // @[MSHR.scala:86:14] input [12:0] io_allocate_bits_tag, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_offset, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_put, // @[MSHR.scala:86:14] input [9:0] io_allocate_bits_set, // @[MSHR.scala:86:14] input io_allocate_bits_repeat, // @[MSHR.scala:86:14] input io_directory_valid, // @[MSHR.scala:86:14] input io_directory_bits_dirty, // @[MSHR.scala:86:14] input [1:0] io_directory_bits_state, // @[MSHR.scala:86:14] input io_directory_bits_clients, // @[MSHR.scala:86:14] input [12:0] io_directory_bits_tag, // @[MSHR.scala:86:14] input io_directory_bits_hit, // @[MSHR.scala:86:14] input [2:0] io_directory_bits_way, // @[MSHR.scala:86:14] output io_status_valid, // @[MSHR.scala:86:14] output [9:0] io_status_bits_set, // @[MSHR.scala:86:14] output [12:0] io_status_bits_tag, // @[MSHR.scala:86:14] output [2:0] io_status_bits_way, // @[MSHR.scala:86:14] output io_status_bits_blockB, // @[MSHR.scala:86:14] output io_status_bits_nestB, // @[MSHR.scala:86:14] output io_status_bits_blockC, // @[MSHR.scala:86:14] output io_status_bits_nestC, // @[MSHR.scala:86:14] input io_schedule_ready, // @[MSHR.scala:86:14] output io_schedule_valid, // @[MSHR.scala:86:14] output io_schedule_bits_a_valid, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_a_bits_tag, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_a_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_a_bits_param, // @[MSHR.scala:86:14] output io_schedule_bits_a_bits_block, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_b_bits_param, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_b_bits_tag, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_b_bits_set, // @[MSHR.scala:86:14] output io_schedule_bits_b_bits_clients, // @[MSHR.scala:86:14] output io_schedule_bits_c_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_opcode, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_param, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_c_bits_tag, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_c_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_c_bits_dirty, // @[MSHR.scala:86:14] output io_schedule_bits_d_valid, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_0, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_1, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_2, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_control, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_opcode, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_param, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_size, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_source, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_d_bits_tag, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_offset, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_put, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_d_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_bad, // @[MSHR.scala:86:14] output io_schedule_bits_e_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_e_bits_sink, // @[MSHR.scala:86:14] output io_schedule_bits_x_valid, // @[MSHR.scala:86:14] output io_schedule_bits_dir_valid, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_dir_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_dir_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_dir_bits_data_dirty, // @[MSHR.scala:86:14] output [1:0] io_schedule_bits_dir_bits_data_state, // @[MSHR.scala:86:14] output io_schedule_bits_dir_bits_data_clients, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_dir_bits_data_tag, // @[MSHR.scala:86:14] output io_schedule_bits_reload, // @[MSHR.scala:86:14] input io_sinkd_valid, // @[MSHR.scala:86:14] input io_sinkd_bits_last, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_opcode, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_param, // @[MSHR.scala:86:14] input [1:0] io_sinkd_bits_source, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_sink, // @[MSHR.scala:86:14] input io_sinkd_bits_denied, // @[MSHR.scala:86:14] input [9:0] io_nestedwb_set, // @[MSHR.scala:86:14] input [12:0] io_nestedwb_tag, // @[MSHR.scala:86:14] input io_nestedwb_b_toN, // @[MSHR.scala:86:14] input io_nestedwb_b_toB, // @[MSHR.scala:86:14] input io_nestedwb_b_clr_dirty, // @[MSHR.scala:86:14] input io_nestedwb_c_set_dirty // @[MSHR.scala:86:14] ); wire [12:0] final_meta_writeback_tag; // @[MSHR.scala:215:38] wire final_meta_writeback_clients; // @[MSHR.scala:215:38] wire [1:0] final_meta_writeback_state; // @[MSHR.scala:215:38] wire final_meta_writeback_dirty; // @[MSHR.scala:215:38] wire io_schedule_bits_a_valid_0; // @[MSHR.scala:84:7] wire io_allocate_valid_0 = io_allocate_valid; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_0_0 = io_allocate_bits_prio_0; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_1_0 = io_allocate_bits_prio_1; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_2_0 = io_allocate_bits_prio_2; // @[MSHR.scala:84:7] wire io_allocate_bits_control_0 = io_allocate_bits_control; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_opcode_0 = io_allocate_bits_opcode; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_param_0 = io_allocate_bits_param; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_size_0 = io_allocate_bits_size; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_source_0 = io_allocate_bits_source; // @[MSHR.scala:84:7] wire [12:0] io_allocate_bits_tag_0 = io_allocate_bits_tag; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_offset_0 = io_allocate_bits_offset; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_put_0 = io_allocate_bits_put; // @[MSHR.scala:84:7] wire [9:0] io_allocate_bits_set_0 = io_allocate_bits_set; // @[MSHR.scala:84:7] wire io_allocate_bits_repeat_0 = io_allocate_bits_repeat; // @[MSHR.scala:84:7] wire io_directory_valid_0 = io_directory_valid; // @[MSHR.scala:84:7] wire io_directory_bits_dirty_0 = io_directory_bits_dirty; // @[MSHR.scala:84:7] wire [1:0] io_directory_bits_state_0 = io_directory_bits_state; // @[MSHR.scala:84:7] wire io_directory_bits_clients_0 = io_directory_bits_clients; // @[MSHR.scala:84:7] wire [12:0] io_directory_bits_tag_0 = io_directory_bits_tag; // @[MSHR.scala:84:7] wire io_directory_bits_hit_0 = io_directory_bits_hit; // @[MSHR.scala:84:7] wire [2:0] io_directory_bits_way_0 = io_directory_bits_way; // @[MSHR.scala:84:7] wire io_schedule_ready_0 = io_schedule_ready; // @[MSHR.scala:84:7] wire io_sinkd_valid_0 = io_sinkd_valid; // @[MSHR.scala:84:7] wire io_sinkd_bits_last_0 = io_sinkd_bits_last; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_opcode_0 = io_sinkd_bits_opcode; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_param_0 = io_sinkd_bits_param; // @[MSHR.scala:84:7] wire [1:0] io_sinkd_bits_source_0 = io_sinkd_bits_source; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_sink_0 = io_sinkd_bits_sink; // @[MSHR.scala:84:7] wire io_sinkd_bits_denied_0 = io_sinkd_bits_denied; // @[MSHR.scala:84:7] wire [9:0] io_nestedwb_set_0 = io_nestedwb_set; // @[MSHR.scala:84:7] wire [12:0] io_nestedwb_tag_0 = io_nestedwb_tag; // @[MSHR.scala:84:7] wire io_nestedwb_b_toN_0 = io_nestedwb_b_toN; // @[MSHR.scala:84:7] wire io_nestedwb_b_toB_0 = io_nestedwb_b_toB; // @[MSHR.scala:84:7] wire io_nestedwb_b_clr_dirty_0 = io_nestedwb_b_clr_dirty; // @[MSHR.scala:84:7] wire io_nestedwb_c_set_dirty_0 = io_nestedwb_c_set_dirty; // @[MSHR.scala:84:7] wire io_schedule_bits_b_valid = 1'h0; // @[MSHR.scala:84:7] wire io_schedule_bits_x_bits_fail = 1'h0; // @[MSHR.scala:84:7] wire io_sinkc_valid = 1'h0; // @[MSHR.scala:84:7] wire io_sinkc_bits_last = 1'h0; // @[MSHR.scala:84:7] wire io_sinkc_bits_data = 1'h0; // @[MSHR.scala:84:7] wire io_sinke_valid = 1'h0; // @[MSHR.scala:84:7] wire _io_status_bits_blockB_T_2 = 1'h0; // @[MSHR.scala:168:62] wire _io_status_bits_blockB_T_4 = 1'h0; // @[MSHR.scala:168:82] wire _io_status_bits_nestC_T = 1'h0; // @[MSHR.scala:173:43] wire _io_status_bits_nestC_T_1 = 1'h0; // @[MSHR.scala:173:64] wire _io_status_bits_nestC_T_2 = 1'h0; // @[MSHR.scala:173:61] wire _io_schedule_bits_b_valid_T = 1'h0; // @[MSHR.scala:185:31] wire _io_schedule_bits_b_valid_T_1 = 1'h0; // @[MSHR.scala:185:44] wire _io_schedule_bits_b_valid_T_2 = 1'h0; // @[MSHR.scala:185:41] wire _io_schedule_bits_c_valid_T_2 = 1'h0; // @[MSHR.scala:186:68] wire _io_schedule_bits_c_valid_T_3 = 1'h0; // @[MSHR.scala:186:80] wire _final_meta_writeback_clients_T_5 = 1'h0; // @[MSHR.scala:226:56] wire _final_meta_writeback_clients_T_13 = 1'h0; // @[MSHR.scala:246:40] wire invalid_dirty = 1'h0; // @[MSHR.scala:268:21] wire invalid_clients = 1'h0; // @[MSHR.scala:268:21] wire _honour_BtoT_T = 1'h0; // @[MSHR.scala:276:47] wire _honour_BtoT_T_1 = 1'h0; // @[MSHR.scala:276:64] wire honour_BtoT = 1'h0; // @[MSHR.scala:276:30] wire _excluded_client_T_7 = 1'h0; // @[Parameters.scala:279:137] wire excluded_client = 1'h0; // @[MSHR.scala:279:28] wire _io_schedule_bits_b_bits_param_T = 1'h0; // @[MSHR.scala:286:42] wire _io_schedule_bits_b_bits_tag_T = 1'h0; // @[MSHR.scala:287:42] wire _after_T_4 = 1'h0; // @[MSHR.scala:323:11] wire _last_probe_T = 1'h0; // @[MSHR.scala:459:33] wire _probe_toN_T = 1'h0; // @[Parameters.scala:282:11] wire _probe_toN_T_1 = 1'h0; // @[Parameters.scala:282:43] wire _probe_toN_T_2 = 1'h0; // @[Parameters.scala:282:34] wire _probe_toN_T_3 = 1'h0; // @[Parameters.scala:282:75] wire probe_toN = 1'h0; // @[Parameters.scala:282:66] wire _new_skipProbe_T_6 = 1'h0; // @[Parameters.scala:279:137] wire new_skipProbe = 1'h0; // @[MSHR.scala:509:26] wire _prior_T_4 = 1'h0; // @[MSHR.scala:323:11] wire _final_meta_writeback_clients_T_6 = 1'h1; // @[MSHR.scala:226:52] wire _final_meta_writeback_clients_T_8 = 1'h1; // @[MSHR.scala:232:54] wire _final_meta_writeback_clients_T_10 = 1'h1; // @[MSHR.scala:245:66] wire _final_meta_writeback_clients_T_15 = 1'h1; // @[MSHR.scala:258:54] wire _io_schedule_bits_b_bits_clients_T = 1'h1; // @[MSHR.scala:289:53] wire _last_probe_T_1 = 1'h1; // @[MSHR.scala:459:66] wire [1:0] io_schedule_bits_a_bits_source = 2'h0; // @[MSHR.scala:84:7] wire [1:0] io_schedule_bits_c_bits_source = 2'h0; // @[MSHR.scala:84:7] wire [1:0] invalid_state = 2'h0; // @[MSHR.scala:268:21] wire [2:0] io_schedule_bits_d_bits_sink = 3'h0; // @[MSHR.scala:84:7] wire [2:0] io_sinkc_bits_param = 3'h0; // @[MSHR.scala:84:7] wire [2:0] io_sinke_bits_sink = 3'h0; // @[MSHR.scala:84:7] wire [9:0] io_sinkc_bits_set = 10'h0; // @[MSHR.scala:84:7] wire [12:0] io_sinkc_bits_tag = 13'h0; // @[MSHR.scala:84:7] wire [12:0] invalid_tag = 13'h0; // @[MSHR.scala:268:21] wire [5:0] io_sinkc_bits_source = 6'h0; // @[MSHR.scala:84:7] wire [1:0] _final_meta_writeback_state_T_11 = 2'h1; // @[MSHR.scala:240:70] wire [1:0] _io_schedule_bits_d_bits_param_T_2 = 2'h1; // @[MSHR.scala:301:53] wire allocate_as_full_prio_0 = io_allocate_bits_prio_0_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_prio_1 = io_allocate_bits_prio_1_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_prio_2 = io_allocate_bits_prio_2_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_control = io_allocate_bits_control_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_opcode = io_allocate_bits_opcode_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_param = io_allocate_bits_param_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_size = io_allocate_bits_size_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_source = io_allocate_bits_source_0; // @[MSHR.scala:84:7, :504:34] wire [12:0] allocate_as_full_tag = io_allocate_bits_tag_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_offset = io_allocate_bits_offset_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_put = io_allocate_bits_put_0; // @[MSHR.scala:84:7, :504:34] wire [9:0] allocate_as_full_set = io_allocate_bits_set_0; // @[MSHR.scala:84:7, :504:34] wire _io_status_bits_blockB_T_8; // @[MSHR.scala:168:40] wire _io_status_bits_nestB_T_4; // @[MSHR.scala:169:93] wire _io_status_bits_blockC_T; // @[MSHR.scala:172:28] wire _io_status_bits_nestC_T_5; // @[MSHR.scala:173:39] wire _io_schedule_valid_T_5; // @[MSHR.scala:193:105] wire _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:184:55] wire _io_schedule_valid_T = io_schedule_bits_a_valid_0; // @[MSHR.scala:84:7, :192:49] wire _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:283:91] wire [2:0] _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:286:41] wire [12:0] _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:287:41] wire _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:289:51] wire _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:186:64] wire [2:0] _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:290:41] wire [2:0] _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:291:41] wire _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:187:57] wire [2:0] _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:298:41] wire _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:188:43] wire _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:189:40] wire _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:190:66] wire _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:310:41] wire [1:0] _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:310:41] wire _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:310:41] wire [12:0] _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:310:41] wire no_wait; // @[MSHR.scala:183:83] wire [9:0] io_status_bits_set_0; // @[MSHR.scala:84:7] wire [12:0] io_status_bits_tag_0; // @[MSHR.scala:84:7] wire [2:0] io_status_bits_way_0; // @[MSHR.scala:84:7] wire io_status_bits_blockB_0; // @[MSHR.scala:84:7] wire io_status_bits_nestB_0; // @[MSHR.scala:84:7] wire io_status_bits_blockC_0; // @[MSHR.scala:84:7] wire io_status_bits_nestC_0; // @[MSHR.scala:84:7] wire io_status_valid_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_a_bits_tag_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_a_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_a_bits_param_0; // @[MSHR.scala:84:7] wire io_schedule_bits_a_bits_block_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_b_bits_param_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_b_bits_tag_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_b_bits_set_0; // @[MSHR.scala:84:7] wire io_schedule_bits_b_bits_clients_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_opcode_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_param_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_c_bits_tag_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_c_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_c_bits_dirty_0; // @[MSHR.scala:84:7] wire io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_0_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_1_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_2_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_control_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_opcode_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_param_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_size_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_source_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_d_bits_tag_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_offset_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_put_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_d_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_bad_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_e_bits_sink_0; // @[MSHR.scala:84:7] wire io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_bits_data_dirty_0; // @[MSHR.scala:84:7] wire [1:0] io_schedule_bits_dir_bits_data_state_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_bits_data_clients_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_dir_bits_data_tag_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_dir_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_dir_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_reload_0; // @[MSHR.scala:84:7] wire io_schedule_valid_0; // @[MSHR.scala:84:7] reg request_valid; // @[MSHR.scala:97:30] assign io_status_valid_0 = request_valid; // @[MSHR.scala:84:7, :97:30] reg request_prio_0; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_0_0 = request_prio_0; // @[MSHR.scala:84:7, :98:20] reg request_prio_1; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_1_0 = request_prio_1; // @[MSHR.scala:84:7, :98:20] reg request_prio_2; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_2_0 = request_prio_2; // @[MSHR.scala:84:7, :98:20] reg request_control; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_control_0 = request_control; // @[MSHR.scala:84:7, :98:20] reg [2:0] request_opcode; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_opcode_0 = request_opcode; // @[MSHR.scala:84:7, :98:20] reg [2:0] request_param; // @[MSHR.scala:98:20] reg [2:0] request_size; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_size_0 = request_size; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_source; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_source_0 = request_source; // @[MSHR.scala:84:7, :98:20] reg [12:0] request_tag; // @[MSHR.scala:98:20] assign io_status_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_a_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_d_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] assign _io_schedule_bits_b_bits_tag_T_1 = request_tag; // @[MSHR.scala:98:20, :287:41] reg [5:0] request_offset; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_offset_0 = request_offset; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_put; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_put_0 = request_put; // @[MSHR.scala:84:7, :98:20] reg [9:0] request_set; // @[MSHR.scala:98:20] assign io_status_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_a_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_b_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_c_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_d_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_dir_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] reg meta_valid; // @[MSHR.scala:99:27] reg meta_dirty; // @[MSHR.scala:100:17] assign io_schedule_bits_c_bits_dirty_0 = meta_dirty; // @[MSHR.scala:84:7, :100:17] reg [1:0] meta_state; // @[MSHR.scala:100:17] reg meta_clients; // @[MSHR.scala:100:17] wire _meta_no_clients_T = meta_clients; // @[MSHR.scala:100:17, :220:39] wire _final_meta_writeback_clients_T_7 = meta_clients; // @[MSHR.scala:100:17, :226:50] wire _final_meta_writeback_clients_T_9 = meta_clients; // @[MSHR.scala:100:17, :232:52] wire _final_meta_writeback_clients_T_11 = meta_clients; // @[MSHR.scala:100:17, :245:64] wire _final_meta_writeback_clients_T_16 = meta_clients; // @[MSHR.scala:100:17, :258:52] assign _io_schedule_bits_b_bits_clients_T_1 = meta_clients; // @[MSHR.scala:100:17, :289:51] wire evict_c = meta_clients; // @[MSHR.scala:100:17, :315:27] wire before_c = meta_clients; // @[MSHR.scala:100:17, :315:27] wire _last_probe_T_2 = meta_clients; // @[MSHR.scala:100:17, :459:64] reg [12:0] meta_tag; // @[MSHR.scala:100:17] assign io_schedule_bits_c_bits_tag_0 = meta_tag; // @[MSHR.scala:84:7, :100:17] reg meta_hit; // @[MSHR.scala:100:17] reg [2:0] meta_way; // @[MSHR.scala:100:17] assign io_status_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_c_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_d_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_dir_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] wire [2:0] final_meta_writeback_way = meta_way; // @[MSHR.scala:100:17, :215:38] reg s_release; // @[MSHR.scala:124:33] reg w_releaseack; // @[MSHR.scala:125:33] wire _no_wait_T = w_releaseack; // @[MSHR.scala:125:33, :183:33] reg s_acquire; // @[MSHR.scala:127:33] reg s_flush; // @[MSHR.scala:128:33] reg w_grantfirst; // @[MSHR.scala:129:33] reg w_grantlast; // @[MSHR.scala:130:33] reg w_grant; // @[MSHR.scala:131:33] reg s_grantack; // @[MSHR.scala:136:33] reg s_execute; // @[MSHR.scala:137:33] reg w_grantack; // @[MSHR.scala:138:33] reg s_writeback; // @[MSHR.scala:139:33] reg [2:0] sink; // @[MSHR.scala:147:17] assign io_schedule_bits_e_bits_sink_0 = sink; // @[MSHR.scala:84:7, :147:17] reg gotT; // @[MSHR.scala:148:17] reg bad_grant; // @[MSHR.scala:149:22] assign io_schedule_bits_d_bits_bad_0 = bad_grant; // @[MSHR.scala:84:7, :149:22] wire _io_status_bits_blockB_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28] wire _io_status_bits_blockB_T_1 = ~w_releaseack; // @[MSHR.scala:125:33, :168:45] wire _io_status_bits_blockB_T_3 = _io_status_bits_blockB_T_1; // @[MSHR.scala:168:{45,59}] wire _io_status_bits_blockB_T_5 = _io_status_bits_blockB_T_3; // @[MSHR.scala:168:{59,79}] wire _io_status_bits_blockB_T_6 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103] wire _io_status_bits_blockB_T_7 = _io_status_bits_blockB_T_5 & _io_status_bits_blockB_T_6; // @[MSHR.scala:168:{79,100,103}] assign _io_status_bits_blockB_T_8 = _io_status_bits_blockB_T | _io_status_bits_blockB_T_7; // @[MSHR.scala:168:{28,40,100}] assign io_status_bits_blockB_0 = _io_status_bits_blockB_T_8; // @[MSHR.scala:84:7, :168:40] wire _io_status_bits_nestB_T = meta_valid & w_releaseack; // @[MSHR.scala:99:27, :125:33, :169:39] wire _io_status_bits_nestB_T_1 = _io_status_bits_nestB_T; // @[MSHR.scala:169:{39,55}] wire _io_status_bits_nestB_T_2 = _io_status_bits_nestB_T_1; // @[MSHR.scala:169:{55,74}] wire _io_status_bits_nestB_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :169:96] assign _io_status_bits_nestB_T_4 = _io_status_bits_nestB_T_2 & _io_status_bits_nestB_T_3; // @[MSHR.scala:169:{74,93,96}] assign io_status_bits_nestB_0 = _io_status_bits_nestB_T_4; // @[MSHR.scala:84:7, :169:93] assign _io_status_bits_blockC_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28, :172:28] assign io_status_bits_blockC_0 = _io_status_bits_blockC_T; // @[MSHR.scala:84:7, :172:28] wire _io_status_bits_nestC_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :173:85] wire _io_status_bits_nestC_T_4 = _io_status_bits_nestC_T_3; // @[MSHR.scala:173:{82,85}] assign _io_status_bits_nestC_T_5 = meta_valid & _io_status_bits_nestC_T_4; // @[MSHR.scala:99:27, :173:{39,82}] assign io_status_bits_nestC_0 = _io_status_bits_nestC_T_5; // @[MSHR.scala:84:7, :173:39] wire _no_wait_T_1 = _no_wait_T & w_grantlast; // @[MSHR.scala:130:33, :183:{33,49}] wire _no_wait_T_2 = _no_wait_T_1; // @[MSHR.scala:183:{49,64}] assign no_wait = _no_wait_T_2 & w_grantack; // @[MSHR.scala:138:33, :183:{64,83}] assign io_schedule_bits_reload_0 = no_wait; // @[MSHR.scala:84:7, :183:83] wire _io_schedule_bits_a_valid_T = ~s_acquire; // @[MSHR.scala:127:33, :184:31] wire _io_schedule_bits_a_valid_T_1 = _io_schedule_bits_a_valid_T & s_release; // @[MSHR.scala:124:33, :184:{31,42}] assign _io_schedule_bits_a_valid_T_2 = _io_schedule_bits_a_valid_T_1; // @[MSHR.scala:184:{42,55}] assign io_schedule_bits_a_valid_0 = _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:84:7, :184:55] wire _io_schedule_bits_c_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32] wire _io_schedule_bits_c_valid_T_1 = _io_schedule_bits_c_valid_T; // @[MSHR.scala:186:{32,43}] assign _io_schedule_bits_c_valid_T_4 = _io_schedule_bits_c_valid_T_1; // @[MSHR.scala:186:{43,64}] assign io_schedule_bits_c_valid_0 = _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:84:7, :186:64] wire _io_schedule_bits_d_valid_T = ~s_execute; // @[MSHR.scala:137:33, :187:31] wire _io_schedule_bits_d_valid_T_1 = _io_schedule_bits_d_valid_T; // @[MSHR.scala:187:{31,42}] assign _io_schedule_bits_d_valid_T_2 = _io_schedule_bits_d_valid_T_1 & w_grant; // @[MSHR.scala:131:33, :187:{42,57}] assign io_schedule_bits_d_valid_0 = _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:84:7, :187:57] wire _io_schedule_bits_e_valid_T = ~s_grantack; // @[MSHR.scala:136:33, :188:31] assign _io_schedule_bits_e_valid_T_1 = _io_schedule_bits_e_valid_T & w_grantfirst; // @[MSHR.scala:129:33, :188:{31,43}] assign io_schedule_bits_e_valid_0 = _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:84:7, :188:43] wire _io_schedule_bits_x_valid_T = ~s_flush; // @[MSHR.scala:128:33, :189:31] assign _io_schedule_bits_x_valid_T_1 = _io_schedule_bits_x_valid_T & w_releaseack; // @[MSHR.scala:125:33, :189:{31,40}] assign io_schedule_bits_x_valid_0 = _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:84:7, :189:40] wire _io_schedule_bits_dir_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :190:34] wire _io_schedule_bits_dir_valid_T_1 = _io_schedule_bits_dir_valid_T; // @[MSHR.scala:190:{34,45}] wire _io_schedule_bits_dir_valid_T_2 = ~s_writeback; // @[MSHR.scala:139:33, :190:70] wire _io_schedule_bits_dir_valid_T_3 = _io_schedule_bits_dir_valid_T_2 & no_wait; // @[MSHR.scala:183:83, :190:{70,83}] assign _io_schedule_bits_dir_valid_T_4 = _io_schedule_bits_dir_valid_T_1 | _io_schedule_bits_dir_valid_T_3; // @[MSHR.scala:190:{45,66,83}] assign io_schedule_bits_dir_valid_0 = _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:84:7, :190:66] wire _io_schedule_valid_T_1 = _io_schedule_valid_T | io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7, :192:{49,77}] wire _io_schedule_valid_T_2 = _io_schedule_valid_T_1 | io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7, :192:{77,105}] wire _io_schedule_valid_T_3 = _io_schedule_valid_T_2 | io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7, :192:105, :193:49] wire _io_schedule_valid_T_4 = _io_schedule_valid_T_3 | io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7, :193:{49,77}] assign _io_schedule_valid_T_5 = _io_schedule_valid_T_4 | io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7, :193:{77,105}] assign io_schedule_valid_0 = _io_schedule_valid_T_5; // @[MSHR.scala:84:7, :193:105] wire _io_schedule_bits_dir_bits_data_WIRE_dirty = final_meta_writeback_dirty; // @[MSHR.scala:215:38, :310:71] wire [1:0] _io_schedule_bits_dir_bits_data_WIRE_state = final_meta_writeback_state; // @[MSHR.scala:215:38, :310:71] wire _io_schedule_bits_dir_bits_data_WIRE_clients = final_meta_writeback_clients; // @[MSHR.scala:215:38, :310:71] wire after_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27] wire prior_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27] wire [12:0] _io_schedule_bits_dir_bits_data_WIRE_tag = final_meta_writeback_tag; // @[MSHR.scala:215:38, :310:71] wire final_meta_writeback_hit; // @[MSHR.scala:215:38] wire _req_needT_T = request_opcode[2]; // @[Parameters.scala:269:12] wire _final_meta_writeback_dirty_T_3 = request_opcode[2]; // @[Parameters.scala:269:12] wire _req_needT_T_1 = ~_req_needT_T; // @[Parameters.scala:269:{5,12}] wire _GEN = request_opcode == 3'h5; // @[Parameters.scala:270:13] wire _req_needT_T_2; // @[Parameters.scala:270:13] assign _req_needT_T_2 = _GEN; // @[Parameters.scala:270:13] wire _excluded_client_T_6; // @[Parameters.scala:279:117] assign _excluded_client_T_6 = _GEN; // @[Parameters.scala:270:13, :279:117] wire _GEN_0 = request_param == 3'h1; // @[Parameters.scala:270:42] wire _req_needT_T_3; // @[Parameters.scala:270:42] assign _req_needT_T_3 = _GEN_0; // @[Parameters.scala:270:42] wire _final_meta_writeback_clients_T; // @[Parameters.scala:282:11] assign _final_meta_writeback_clients_T = _GEN_0; // @[Parameters.scala:270:42, :282:11] wire _io_schedule_bits_d_bits_param_T_7; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_7 = _GEN_0; // @[Parameters.scala:270:42] wire _req_needT_T_4 = _req_needT_T_2 & _req_needT_T_3; // @[Parameters.scala:270:{13,33,42}] wire _req_needT_T_5 = _req_needT_T_1 | _req_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33] wire _GEN_1 = request_opcode == 3'h6; // @[Parameters.scala:271:14] wire _req_needT_T_6; // @[Parameters.scala:271:14] assign _req_needT_T_6 = _GEN_1; // @[Parameters.scala:271:14] wire _req_acquire_T; // @[MSHR.scala:219:36] assign _req_acquire_T = _GEN_1; // @[Parameters.scala:271:14] wire _excluded_client_T_1; // @[Parameters.scala:279:12] assign _excluded_client_T_1 = _GEN_1; // @[Parameters.scala:271:14, :279:12] wire _req_needT_T_7 = &request_opcode; // @[Parameters.scala:271:52] wire _req_needT_T_8 = _req_needT_T_6 | _req_needT_T_7; // @[Parameters.scala:271:{14,42,52}] wire _req_needT_T_9 = |request_param; // @[Parameters.scala:271:89] wire _req_needT_T_10 = _req_needT_T_8 & _req_needT_T_9; // @[Parameters.scala:271:{42,80,89}] wire req_needT = _req_needT_T_5 | _req_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80] wire _req_acquire_T_1 = &request_opcode; // @[Parameters.scala:271:52] wire req_acquire = _req_acquire_T | _req_acquire_T_1; // @[MSHR.scala:219:{36,53,71}] wire meta_no_clients = ~_meta_no_clients_T; // @[MSHR.scala:220:{25,39}] wire _req_promoteT_T = &meta_state; // @[MSHR.scala:100:17, :221:81] wire _req_promoteT_T_1 = meta_no_clients & _req_promoteT_T; // @[MSHR.scala:220:25, :221:{67,81}] wire _req_promoteT_T_2 = meta_hit ? _req_promoteT_T_1 : gotT; // @[MSHR.scala:100:17, :148:17, :221:{40,67}] wire req_promoteT = req_acquire & _req_promoteT_T_2; // @[MSHR.scala:219:53, :221:{34,40}] wire _final_meta_writeback_dirty_T = request_opcode[0]; // @[MSHR.scala:98:20, :224:65] wire _final_meta_writeback_dirty_T_1 = meta_dirty | _final_meta_writeback_dirty_T; // @[MSHR.scala:100:17, :224:{48,65}] wire _final_meta_writeback_state_T = request_param != 3'h3; // @[MSHR.scala:98:20, :225:55] wire _GEN_2 = meta_state == 2'h2; // @[MSHR.scala:100:17, :225:78] wire _final_meta_writeback_state_T_1; // @[MSHR.scala:225:78] assign _final_meta_writeback_state_T_1 = _GEN_2; // @[MSHR.scala:225:78] wire _final_meta_writeback_state_T_12; // @[MSHR.scala:240:70] assign _final_meta_writeback_state_T_12 = _GEN_2; // @[MSHR.scala:225:78, :240:70] wire _evict_T_2; // @[MSHR.scala:317:26] assign _evict_T_2 = _GEN_2; // @[MSHR.scala:225:78, :317:26] wire _before_T_1; // @[MSHR.scala:317:26] assign _before_T_1 = _GEN_2; // @[MSHR.scala:225:78, :317:26] wire _final_meta_writeback_state_T_2 = _final_meta_writeback_state_T & _final_meta_writeback_state_T_1; // @[MSHR.scala:225:{55,64,78}] wire [1:0] _final_meta_writeback_state_T_3 = _final_meta_writeback_state_T_2 ? 2'h3 : meta_state; // @[MSHR.scala:100:17, :225:{40,64}] wire _GEN_3 = request_param == 3'h2; // @[Parameters.scala:282:43] wire _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:43] assign _final_meta_writeback_clients_T_1 = _GEN_3; // @[Parameters.scala:282:43] wire _io_schedule_bits_d_bits_param_T_5; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_5 = _GEN_3; // @[Parameters.scala:282:43] wire _final_meta_writeback_clients_T_2 = _final_meta_writeback_clients_T | _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:{11,34,43}] wire _final_meta_writeback_clients_T_3 = request_param == 3'h5; // @[Parameters.scala:282:75] wire _final_meta_writeback_clients_T_4 = _final_meta_writeback_clients_T_2 | _final_meta_writeback_clients_T_3; // @[Parameters.scala:282:{34,66,75}] wire _final_meta_writeback_dirty_T_2 = meta_hit & meta_dirty; // @[MSHR.scala:100:17, :236:45] wire _final_meta_writeback_dirty_T_4 = ~_final_meta_writeback_dirty_T_3; // @[MSHR.scala:236:{63,78}] wire _final_meta_writeback_dirty_T_5 = _final_meta_writeback_dirty_T_2 | _final_meta_writeback_dirty_T_4; // @[MSHR.scala:236:{45,60,63}] wire [1:0] _GEN_4 = {1'h1, ~req_acquire}; // @[MSHR.scala:219:53, :238:40] wire [1:0] _final_meta_writeback_state_T_4; // @[MSHR.scala:238:40] assign _final_meta_writeback_state_T_4 = _GEN_4; // @[MSHR.scala:238:40] wire [1:0] _final_meta_writeback_state_T_6; // @[MSHR.scala:239:65] assign _final_meta_writeback_state_T_6 = _GEN_4; // @[MSHR.scala:238:40, :239:65] wire _final_meta_writeback_state_T_5 = ~meta_hit; // @[MSHR.scala:100:17, :239:41] wire [1:0] _final_meta_writeback_state_T_7 = gotT ? _final_meta_writeback_state_T_6 : 2'h1; // @[MSHR.scala:148:17, :239:{55,65}] wire _final_meta_writeback_state_T_8 = meta_no_clients & req_acquire; // @[MSHR.scala:219:53, :220:25, :244:72] wire [1:0] _final_meta_writeback_state_T_9 = {1'h1, ~_final_meta_writeback_state_T_8}; // @[MSHR.scala:244:{55,72}] wire _GEN_5 = meta_state == 2'h1; // @[MSHR.scala:100:17, :240:70] wire _final_meta_writeback_state_T_10; // @[MSHR.scala:240:70] assign _final_meta_writeback_state_T_10 = _GEN_5; // @[MSHR.scala:240:70] wire _io_schedule_bits_c_bits_param_T; // @[MSHR.scala:291:53] assign _io_schedule_bits_c_bits_param_T = _GEN_5; // @[MSHR.scala:240:70, :291:53] wire _evict_T_1; // @[MSHR.scala:317:26] assign _evict_T_1 = _GEN_5; // @[MSHR.scala:240:70, :317:26] wire _before_T; // @[MSHR.scala:317:26] assign _before_T = _GEN_5; // @[MSHR.scala:240:70, :317:26] wire [1:0] _final_meta_writeback_state_T_13 = {_final_meta_writeback_state_T_12, 1'h1}; // @[MSHR.scala:240:70] wire _final_meta_writeback_state_T_14 = &meta_state; // @[MSHR.scala:100:17, :221:81, :240:70] wire [1:0] _final_meta_writeback_state_T_15 = _final_meta_writeback_state_T_14 ? _final_meta_writeback_state_T_9 : _final_meta_writeback_state_T_13; // @[MSHR.scala:240:70, :244:55] wire [1:0] _final_meta_writeback_state_T_16 = _final_meta_writeback_state_T_5 ? _final_meta_writeback_state_T_7 : _final_meta_writeback_state_T_15; // @[MSHR.scala:239:{40,41,55}, :240:70] wire [1:0] _final_meta_writeback_state_T_17 = req_needT ? _final_meta_writeback_state_T_4 : _final_meta_writeback_state_T_16; // @[Parameters.scala:270:70] wire _final_meta_writeback_clients_T_12 = meta_hit & _final_meta_writeback_clients_T_11; // @[MSHR.scala:100:17, :245:{40,64}] wire _final_meta_writeback_clients_T_14 = _final_meta_writeback_clients_T_12; // @[MSHR.scala:245:{40,84}] assign final_meta_writeback_tag = request_control ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :215:38, :228:53, :247:30] assign final_meta_writeback_hit = bad_grant ? meta_hit : ~request_control; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :228:53, :234:30, :248:30, :251:20, :252:21] assign final_meta_writeback_dirty = ~bad_grant & (request_control ? ~meta_hit & meta_dirty : _final_meta_writeback_dirty_T_5); // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :228:53, :229:21, :230:36, :236:{32,60}, :251:20, :252:21] assign final_meta_writeback_state = bad_grant ? {1'h0, meta_hit} : request_control ? (meta_hit ? 2'h0 : meta_state) : _final_meta_writeback_state_T_17; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :228:53, :229:21, :231:36, :237:{32,38}, :251:20, :252:21, :257:36, :263:36] assign final_meta_writeback_clients = bad_grant ? meta_hit & _final_meta_writeback_clients_T_16 : request_control ? (meta_hit ? _final_meta_writeback_clients_T_9 : meta_clients) : _final_meta_writeback_clients_T_14; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :228:53, :229:21, :232:{36,52}, :245:{34,84}, :251:20, :252:21, :258:{36,52}, :264:36] wire _excluded_client_T = meta_hit & request_prio_0; // @[MSHR.scala:98:20, :100:17, :279:38] wire _excluded_client_T_2 = &request_opcode; // @[Parameters.scala:271:52, :279:50] wire _excluded_client_T_3 = _excluded_client_T_1 | _excluded_client_T_2; // @[Parameters.scala:279:{12,40,50}] wire _excluded_client_T_4 = request_opcode == 3'h4; // @[Parameters.scala:279:87] wire _excluded_client_T_5 = _excluded_client_T_3 | _excluded_client_T_4; // @[Parameters.scala:279:{40,77,87}] wire _excluded_client_T_8 = _excluded_client_T_5; // @[Parameters.scala:279:{77,106}] wire _excluded_client_T_9 = _excluded_client_T & _excluded_client_T_8; // @[Parameters.scala:279:106] wire [1:0] _io_schedule_bits_a_bits_param_T = meta_hit ? 2'h2 : 2'h1; // @[MSHR.scala:100:17, :282:56] wire [1:0] _io_schedule_bits_a_bits_param_T_1 = req_needT ? _io_schedule_bits_a_bits_param_T : 2'h0; // @[Parameters.scala:270:70] assign io_schedule_bits_a_bits_param_0 = {1'h0, _io_schedule_bits_a_bits_param_T_1}; // @[MSHR.scala:84:7, :282:{35,41}] wire _io_schedule_bits_a_bits_block_T = request_size != 3'h6; // @[MSHR.scala:98:20, :283:51] wire _io_schedule_bits_a_bits_block_T_1 = request_opcode == 3'h0; // @[MSHR.scala:98:20, :284:55] wire _io_schedule_bits_a_bits_block_T_2 = &request_opcode; // @[Parameters.scala:271:52] wire _io_schedule_bits_a_bits_block_T_3 = _io_schedule_bits_a_bits_block_T_1 | _io_schedule_bits_a_bits_block_T_2; // @[MSHR.scala:284:{55,71,89}] wire _io_schedule_bits_a_bits_block_T_4 = ~_io_schedule_bits_a_bits_block_T_3; // @[MSHR.scala:284:{38,71}] assign _io_schedule_bits_a_bits_block_T_5 = _io_schedule_bits_a_bits_block_T | _io_schedule_bits_a_bits_block_T_4; // @[MSHR.scala:283:{51,91}, :284:38] assign io_schedule_bits_a_bits_block_0 = _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:84:7, :283:91] wire [1:0] _io_schedule_bits_b_bits_param_T_1 = req_needT ? 2'h2 : 2'h1; // @[Parameters.scala:270:70] wire [2:0] _io_schedule_bits_b_bits_param_T_2 = request_prio_1 ? request_param : {1'h0, _io_schedule_bits_b_bits_param_T_1}; // @[MSHR.scala:98:20, :286:{61,97}] assign _io_schedule_bits_b_bits_param_T_3 = _io_schedule_bits_b_bits_param_T_2; // @[MSHR.scala:286:{41,61}] assign io_schedule_bits_b_bits_param_0 = _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:84:7, :286:41] assign io_schedule_bits_b_bits_tag_0 = _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:84:7, :287:41] assign io_schedule_bits_b_bits_clients_0 = _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:84:7, :289:51] assign _io_schedule_bits_c_bits_opcode_T = {2'h3, meta_dirty}; // @[MSHR.scala:100:17, :290:41] assign io_schedule_bits_c_bits_opcode_0 = _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:84:7, :290:41] assign _io_schedule_bits_c_bits_param_T_1 = _io_schedule_bits_c_bits_param_T ? 3'h2 : 3'h1; // @[MSHR.scala:291:{41,53}] assign io_schedule_bits_c_bits_param_0 = _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:84:7, :291:41] wire _io_schedule_bits_d_bits_param_T = ~req_acquire; // @[MSHR.scala:219:53, :298:42] wire [1:0] _io_schedule_bits_d_bits_param_T_1 = {1'h0, req_promoteT}; // @[MSHR.scala:221:34, :300:53] wire _io_schedule_bits_d_bits_param_T_3 = ~(|request_param); // @[Parameters.scala:271:89] wire [2:0] _io_schedule_bits_d_bits_param_T_4 = _io_schedule_bits_d_bits_param_T_3 ? {1'h0, _io_schedule_bits_d_bits_param_T_1} : request_param; // @[MSHR.scala:98:20, :299:79, :300:53] wire [2:0] _io_schedule_bits_d_bits_param_T_6 = _io_schedule_bits_d_bits_param_T_5 ? 3'h1 : _io_schedule_bits_d_bits_param_T_4; // @[MSHR.scala:299:79] wire [2:0] _io_schedule_bits_d_bits_param_T_8 = _io_schedule_bits_d_bits_param_T_7 ? 3'h1 : _io_schedule_bits_d_bits_param_T_6; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_9 = _io_schedule_bits_d_bits_param_T ? request_param : _io_schedule_bits_d_bits_param_T_8; // @[MSHR.scala:98:20, :298:{41,42}, :299:79] assign io_schedule_bits_d_bits_param_0 = _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:84:7, :298:41] wire _io_schedule_bits_dir_bits_data_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :310:42] assign _io_schedule_bits_dir_bits_data_T_1_dirty = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_dirty; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_state = _io_schedule_bits_dir_bits_data_T ? 2'h0 : _io_schedule_bits_dir_bits_data_WIRE_state; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_clients = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_clients; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_tag = _io_schedule_bits_dir_bits_data_T ? 13'h0 : _io_schedule_bits_dir_bits_data_WIRE_tag; // @[MSHR.scala:310:{41,42,71}] assign io_schedule_bits_dir_bits_data_dirty_0 = _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_state_0 = _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_clients_0 = _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_tag_0 = _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:84:7, :310:41] wire _evict_T = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :338:32] wire [3:0] evict; // @[MSHR.scala:314:26] wire _evict_out_T = ~evict_c; // @[MSHR.scala:315:27, :318:32] wire [1:0] _GEN_6 = {1'h1, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32] wire [1:0] _evict_out_T_1; // @[MSHR.scala:319:32] assign _evict_out_T_1 = _GEN_6; // @[MSHR.scala:319:32] wire [1:0] _before_out_T_1; // @[MSHR.scala:319:32] assign _before_out_T_1 = _GEN_6; // @[MSHR.scala:319:32] wire _evict_T_3 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26] wire [2:0] _GEN_7 = {2'h2, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:39] wire [2:0] _evict_out_T_2; // @[MSHR.scala:320:39] assign _evict_out_T_2 = _GEN_7; // @[MSHR.scala:320:39] wire [2:0] _before_out_T_2; // @[MSHR.scala:320:39] assign _before_out_T_2 = _GEN_7; // @[MSHR.scala:320:39] wire [2:0] _GEN_8 = {2'h3, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:76] wire [2:0] _evict_out_T_3; // @[MSHR.scala:320:76] assign _evict_out_T_3 = _GEN_8; // @[MSHR.scala:320:76] wire [2:0] _before_out_T_3; // @[MSHR.scala:320:76] assign _before_out_T_3 = _GEN_8; // @[MSHR.scala:320:76] wire [2:0] _evict_out_T_4 = evict_c ? _evict_out_T_2 : _evict_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _evict_T_4 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26] wire _evict_T_5 = ~_evict_T; // @[MSHR.scala:323:11, :338:32] assign evict = _evict_T_5 ? 4'h8 : _evict_T_1 ? {3'h0, _evict_out_T} : _evict_T_2 ? {2'h0, _evict_out_T_1} : _evict_T_3 ? {1'h0, _evict_out_T_4} : {_evict_T_4, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}] wire [3:0] before_0; // @[MSHR.scala:314:26] wire _before_out_T = ~before_c; // @[MSHR.scala:315:27, :318:32] wire _before_T_2 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26] wire [2:0] _before_out_T_4 = before_c ? _before_out_T_2 : _before_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _before_T_3 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26] wire _before_T_4 = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :323:11] assign before_0 = _before_T_4 ? 4'h8 : _before_T ? {3'h0, _before_out_T} : _before_T_1 ? {2'h0, _before_out_T_1} : _before_T_2 ? {1'h0, _before_out_T_4} : {_before_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}] wire [3:0] after; // @[MSHR.scala:314:26] wire _GEN_9 = final_meta_writeback_state == 2'h1; // @[MSHR.scala:215:38, :317:26] wire _after_T; // @[MSHR.scala:317:26] assign _after_T = _GEN_9; // @[MSHR.scala:317:26] wire _prior_T; // @[MSHR.scala:317:26] assign _prior_T = _GEN_9; // @[MSHR.scala:317:26] wire _after_out_T = ~after_c; // @[MSHR.scala:315:27, :318:32] wire _GEN_10 = final_meta_writeback_state == 2'h2; // @[MSHR.scala:215:38, :317:26] wire _after_T_1; // @[MSHR.scala:317:26] assign _after_T_1 = _GEN_10; // @[MSHR.scala:317:26] wire _prior_T_1; // @[MSHR.scala:317:26] assign _prior_T_1 = _GEN_10; // @[MSHR.scala:317:26] wire [1:0] _GEN_11 = {1'h1, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32] wire [1:0] _after_out_T_1; // @[MSHR.scala:319:32] assign _after_out_T_1 = _GEN_11; // @[MSHR.scala:319:32] wire [1:0] _prior_out_T_1; // @[MSHR.scala:319:32] assign _prior_out_T_1 = _GEN_11; // @[MSHR.scala:319:32] wire _after_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26] wire [2:0] _GEN_12 = {2'h2, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:39] wire [2:0] _after_out_T_2; // @[MSHR.scala:320:39] assign _after_out_T_2 = _GEN_12; // @[MSHR.scala:320:39] wire [2:0] _prior_out_T_2; // @[MSHR.scala:320:39] assign _prior_out_T_2 = _GEN_12; // @[MSHR.scala:320:39] wire [2:0] _GEN_13 = {2'h3, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:76] wire [2:0] _after_out_T_3; // @[MSHR.scala:320:76] assign _after_out_T_3 = _GEN_13; // @[MSHR.scala:320:76] wire [2:0] _prior_out_T_3; // @[MSHR.scala:320:76] assign _prior_out_T_3 = _GEN_13; // @[MSHR.scala:320:76] wire [2:0] _after_out_T_4 = after_c ? _after_out_T_2 : _after_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _GEN_14 = final_meta_writeback_state == 2'h0; // @[MSHR.scala:215:38, :317:26] wire _after_T_3; // @[MSHR.scala:317:26] assign _after_T_3 = _GEN_14; // @[MSHR.scala:317:26] wire _prior_T_3; // @[MSHR.scala:317:26] assign _prior_T_3 = _GEN_14; // @[MSHR.scala:317:26] assign after = _after_T ? {3'h0, _after_out_T} : _after_T_1 ? {2'h0, _after_out_T_1} : _after_T_2 ? {1'h0, _after_out_T_4} : {_after_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26] wire last_probe = ~_last_probe_T_2; // @[MSHR.scala:459:{46,64}] wire _w_grant_T = request_offset == 6'h0; // @[MSHR.scala:98:20, :490:33] wire _w_grant_T_1 = _w_grant_T | io_sinkd_bits_last_0; // @[MSHR.scala:84:7, :490:{33,41}] wire _gotT_T = io_sinkd_bits_param_0 == 3'h0; // @[MSHR.scala:84:7, :493:35] wire _new_meta_T = io_allocate_valid_0 & io_allocate_bits_repeat_0; // @[MSHR.scala:84:7, :505:40] wire new_meta_dirty = _new_meta_T ? final_meta_writeback_dirty : io_directory_bits_dirty_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [1:0] new_meta_state = _new_meta_T ? final_meta_writeback_state : io_directory_bits_state_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_meta_clients = _new_meta_T ? final_meta_writeback_clients : io_directory_bits_clients_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [12:0] new_meta_tag = _new_meta_T ? final_meta_writeback_tag : io_directory_bits_tag_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_meta_hit = _new_meta_T ? final_meta_writeback_hit : io_directory_bits_hit_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [2:0] new_meta_way = _new_meta_T ? final_meta_writeback_way : io_directory_bits_way_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_request_prio_0 = io_allocate_valid_0 ? allocate_as_full_prio_0 : request_prio_0; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_prio_1 = io_allocate_valid_0 ? allocate_as_full_prio_1 : request_prio_1; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_prio_2 = io_allocate_valid_0 ? allocate_as_full_prio_2 : request_prio_2; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_control = io_allocate_valid_0 ? allocate_as_full_control : request_control; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_opcode = io_allocate_valid_0 ? allocate_as_full_opcode : request_opcode; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_param = io_allocate_valid_0 ? allocate_as_full_param : request_param; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_size = io_allocate_valid_0 ? allocate_as_full_size : request_size; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_source = io_allocate_valid_0 ? allocate_as_full_source : request_source; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [12:0] new_request_tag = io_allocate_valid_0 ? allocate_as_full_tag : request_tag; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_offset = io_allocate_valid_0 ? allocate_as_full_offset : request_offset; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_put = io_allocate_valid_0 ? allocate_as_full_put : request_put; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [9:0] new_request_set = io_allocate_valid_0 ? allocate_as_full_set : request_set; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire _new_needT_T = new_request_opcode[2]; // @[Parameters.scala:269:12] wire _new_needT_T_1 = ~_new_needT_T; // @[Parameters.scala:269:{5,12}] wire _GEN_15 = new_request_opcode == 3'h5; // @[Parameters.scala:270:13] wire _new_needT_T_2; // @[Parameters.scala:270:13] assign _new_needT_T_2 = _GEN_15; // @[Parameters.scala:270:13] wire _new_skipProbe_T_5; // @[Parameters.scala:279:117] assign _new_skipProbe_T_5 = _GEN_15; // @[Parameters.scala:270:13, :279:117] wire _new_needT_T_3 = new_request_param == 3'h1; // @[Parameters.scala:270:42] wire _new_needT_T_4 = _new_needT_T_2 & _new_needT_T_3; // @[Parameters.scala:270:{13,33,42}] wire _new_needT_T_5 = _new_needT_T_1 | _new_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33] wire _T_711 = new_request_opcode == 3'h6; // @[Parameters.scala:271:14] wire _new_needT_T_6; // @[Parameters.scala:271:14] assign _new_needT_T_6 = _T_711; // @[Parameters.scala:271:14] wire _new_skipProbe_T; // @[Parameters.scala:279:12] assign _new_skipProbe_T = _T_711; // @[Parameters.scala:271:14, :279:12] wire _new_needT_T_7 = &new_request_opcode; // @[Parameters.scala:271:52] wire _new_needT_T_8 = _new_needT_T_6 | _new_needT_T_7; // @[Parameters.scala:271:{14,42,52}] wire _new_needT_T_9 = |new_request_param; // @[Parameters.scala:271:89] wire _new_needT_T_10 = _new_needT_T_8 & _new_needT_T_9; // @[Parameters.scala:271:{42,80,89}] wire new_needT = _new_needT_T_5 | _new_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80] wire _new_skipProbe_T_1 = &new_request_opcode; // @[Parameters.scala:271:52, :279:50] wire _new_skipProbe_T_2 = _new_skipProbe_T | _new_skipProbe_T_1; // @[Parameters.scala:279:{12,40,50}] wire _new_skipProbe_T_3 = new_request_opcode == 3'h4; // @[Parameters.scala:279:87] wire _new_skipProbe_T_4 = _new_skipProbe_T_2 | _new_skipProbe_T_3; // @[Parameters.scala:279:{40,77,87}] wire _new_skipProbe_T_7 = _new_skipProbe_T_4; // @[Parameters.scala:279:{77,106}] wire [3:0] prior; // @[MSHR.scala:314:26] wire _prior_out_T = ~prior_c; // @[MSHR.scala:315:27, :318:32] wire _prior_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26] wire [2:0] _prior_out_T_4 = prior_c ? _prior_out_T_2 : _prior_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] assign prior = _prior_T ? {3'h0, _prior_out_T} : _prior_T_1 ? {2'h0, _prior_out_T_1} : _prior_T_2 ? {1'h0, _prior_out_T_4} : {_prior_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26]
Generate the Verilog code corresponding to the following Chisel files. File Parameters.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ import freechips.rocketchip.util.property.cover import scala.math.{min,max} case class CacheParameters( level: Int, ways: Int, sets: Int, blockBytes: Int, beatBytes: Int, // inner hintsSkipProbe: Boolean) { require (ways > 0) require (sets > 0) require (blockBytes > 0 && isPow2(blockBytes)) require (beatBytes > 0 && isPow2(beatBytes)) require (blockBytes >= beatBytes) val blocks = ways * sets val sizeBytes = blocks * blockBytes val blockBeats = blockBytes/beatBytes } case class InclusiveCachePortParameters( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams) { def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e)) } object InclusiveCachePortParameters { val none = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.none) val full = InclusiveCachePortParameters( a = BufferParams.default, b = BufferParams.default, c = BufferParams.default, d = BufferParams.default, e = BufferParams.default) // This removes feed-through paths from C=>A and A=>C val fullC = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.default, d = BufferParams.none, e = BufferParams.none) val flowAD = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.flow, e = BufferParams.none) val flowAE = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.flow) // For innerBuf: // SinkA: no restrictions, flows into scheduler+putbuffer // SourceB: no restrictions, flows out of scheduler // sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore // SourceD: no restrictions, flows out of bankedStore/regout // SinkE: no restrictions, flows into scheduler // // ... so while none is possible, you probably want at least flowAC to cut ready // from the scheduler delay and flowD to ease SourceD back-pressure // For outerBufer: // SourceA: must not be pipe, flows out of scheduler // SinkB: no restrictions, flows into scheduler // SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored // SinkD: no restrictions, flows into scheduler & bankedStore // SourceE: must not be pipe, flows out of scheduler // // ... AE take the channel ready into the scheduler, so you need at least flowAE } case class InclusiveCacheMicroParameters( writeBytes: Int, // backing store update granularity memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz) portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes dirReg: Boolean = false, innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE { require (writeBytes > 0 && isPow2(writeBytes)) require (memCycles > 0) require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant } case class InclusiveCacheControlParameters( address: BigInt, beatBytes: Int, bankedControl: Boolean) case class InclusiveCacheParameters( cache: CacheParameters, micro: InclusiveCacheMicroParameters, control: Boolean, inner: TLEdgeIn, outer: TLEdgeOut)(implicit val p: Parameters) { require (cache.ways > 1) require (cache.sets > 1 && isPow2(cache.sets)) require (micro.writeBytes <= inner.manager.beatBytes) require (micro.writeBytes <= outer.manager.beatBytes) require (inner.manager.beatBytes <= cache.blockBytes) require (outer.manager.beatBytes <= cache.blockBytes) // Require that all cached address ranges have contiguous blocks outer.manager.managers.flatMap(_.address).foreach { a => require (a.alignment >= cache.blockBytes) } // If we are the first level cache, we do not need to support inner-BCE val firstLevel = !inner.client.clients.exists(_.supports.probe) // If we are the last level cache, we do not need to support outer-B val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED) require (lastLevel) // Provision enough resources to achieve full throughput with missing single-beat accesses val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro) val secondary = max(mshrs, micro.memCycles - mshrs) val putLists = micro.memCycles // allow every request to be single beat val putBeats = max(2*cache.blockBeats, micro.memCycles) val relLists = 2 val relBeats = relLists*cache.blockBeats val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address)) val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_)) def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] = if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail) val addressMapping = bitOffsets(pickMask) val addressBits = addressMapping.size // println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}") val allClients = inner.client.clients.size val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size val clientBits = max(1, clientBitsRaw) val stateBits = 2 val wayBits = log2Ceil(cache.ways) val setBits = log2Ceil(cache.sets) val offsetBits = log2Ceil(cache.blockBytes) val tagBits = addressBits - setBits - offsetBits val putBits = log2Ceil(max(putLists, relLists)) require (tagBits > 0) require (offsetBits > 0) val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1 val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1 val innerMaskBits = inner.manager.beatBytes / micro.writeBytes val outerMaskBits = outer.manager.beatBytes / micro.writeBytes def clientBit(source: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse) } } def clientSource(bit: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U)) } } def parseAddress(x: UInt): (UInt, UInt, UInt) = { val offset = Cat(addressMapping.map(o => x(o,o)).reverse) val set = offset >> offsetBits val tag = set >> setBits (tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0)) } def widen(x: UInt, width: Int): UInt = { val y = x | 0.U(width.W) assert (y >> width === 0.U) y(width-1, 0) } def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = { val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits)) val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) } addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) } Cat(bits.reverse) } def restoreAddress(expanded: UInt): UInt = { val missingBits = flatAddresses .map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match .groupBy(_._1) .view .mapValues(_.map(_._2)) val muxMask = AddressDecoder(missingBits.values.toList) val mux = missingBits.toList.map { case (bits, addrs) => val widen = addrs.map(_.widen(~muxMask)) val matches = AddressSet .unify(widen.distinct) .map(_.contains(expanded)) .reduce(_ || _) (matches, bits.U) } expanded | Mux1H(mux) } def dirReg[T <: Data](x: T, en: Bool = true.B): T = { if (micro.dirReg) RegEnable(x, en) else x } def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc) } object MetaData { val stateBits = 2 def INVALID: UInt = 0.U(stateBits.W) // way is empty def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch // Does a request need trunk? def needT(opcode: UInt, param: UInt): Bool = { !opcode(2) || (opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) || ((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB) } // Does a request prove the client need not be probed? def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = { // Acquire(toB) and Get => is N, so no probe // Acquire(*toT) => is N or B, but need T, so no probe // Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client // Put* => is N or B, so probe IS needed opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B) } def isToN(param: UInt): Bool = { param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN } def isToB(param: UInt): Bool = { param === TLPermissions.TtoB || param === TLPermissions.BtoB } } object InclusiveCacheParameters { val lfsrBits = 10 val L2ControlAddress = 0x2010000 val L2ControlSize = 0x1000 def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = { // We need 2-3 normal MSHRs to cover the Directory latency // To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats) } def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = // We need a dedicated MSHR for B+C each 2 + out_mshrs(cache, micro) } class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle File SourceC.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import freechips.rocketchip.tilelink._ class SourceCRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val opcode = UInt(3.W) val param = UInt(3.W) val source = UInt(params.outer.bundle.sourceBits.W) val tag = UInt(params.tagBits.W) val set = UInt(params.setBits.W) val way = UInt(params.wayBits.W) val dirty = Bool() } class SourceC(params: InclusiveCacheParameters) extends Module { val io = IO(new Bundle { val req = Flipped(Decoupled(new SourceCRequest(params))) val c = Decoupled(new TLBundleC(params.outer.bundle)) // BankedStore port val bs_adr = Decoupled(new BankedStoreOuterAddress(params)) val bs_dat = Flipped(new BankedStoreOuterDecoded(params)) // RaW hazard val evict_req = new SourceDHazard(params) val evict_safe = Flipped(Bool()) }) // We ignore the depth and pipe is useless here (we have to provision for worst-case=stall) require (!params.micro.outerBuf.c.pipe) val beatBytes = params.outer.manager.beatBytes val beats = params.cache.blockBytes / beatBytes val flow = params.micro.outerBuf.c.flow val queue = Module(new Queue(chiselTypeOf(io.c.bits), beats + 3 + (if (flow) 0 else 1), flow = flow)) // queue.io.count is far too slow val fillBits = log2Up(beats + 4) val fill = RegInit(0.U(fillBits.W)) val room = RegInit(true.B) when (queue.io.enq.fire =/= queue.io.deq.fire) { fill := fill + Mux(queue.io.enq.fire, 1.U, ~0.U(fillBits.W)) room := fill === 0.U || ((fill === 1.U || fill === 2.U) && !queue.io.enq.fire) } assert (room === queue.io.count <= 1.U) val busy = RegInit(false.B) val beat = RegInit(0.U(params.outerBeatBits.W)) val last = if (params.cache.blockBytes == params.outer.manager.beatBytes) true.B else (beat === ~(0.U(params.outerBeatBits.W))) val req = Mux(!busy, io.req.bits, RegEnable(io.req.bits, !busy && io.req.valid)) val want_data = busy || (io.req.valid && room && io.req.bits.dirty) io.req.ready := !busy && room io.evict_req.set := req.set io.evict_req.way := req.way io.bs_adr.valid := (beat.orR || io.evict_safe) && want_data io.bs_adr.bits.noop := false.B io.bs_adr.bits.way := req.way io.bs_adr.bits.set := req.set io.bs_adr.bits.beat := beat io.bs_adr.bits.mask := ~0.U(params.outerMaskBits.W) params.ccover(io.req.valid && io.req.bits.dirty && room && !io.evict_safe, "SOURCEC_HAZARD", "Prevented Eviction data hazard with backpressure") params.ccover(io.bs_adr.valid && !io.bs_adr.ready, "SOURCEC_SRAM_STALL", "Data SRAM busy") when (io.req.valid && room && io.req.bits.dirty) { busy := true.B } when (io.bs_adr.fire) { beat := beat + 1.U when (last) { busy := false.B beat := 0.U } } val s2_latch = Mux(want_data, io.bs_adr.fire, io.req.fire) val s2_valid = RegNext(s2_latch) val s2_req = RegEnable(req, s2_latch) val s2_beat = RegEnable(beat, s2_latch) val s2_last = RegEnable(last, s2_latch) val s3_latch = s2_valid val s3_valid = RegNext(s3_latch) val s3_req = RegEnable(s2_req, s3_latch) val s3_beat = RegEnable(s2_beat, s3_latch) val s3_last = RegEnable(s2_last, s3_latch) val c = Wire(chiselTypeOf(io.c)) c.valid := s3_valid c.bits.opcode := s3_req.opcode c.bits.param := s3_req.param c.bits.size := params.offsetBits.U c.bits.source := s3_req.source c.bits.address := params.expandAddress(s3_req.tag, s3_req.set, 0.U) c.bits.data := io.bs_dat.data c.bits.corrupt := false.B // We never accept at the front-end unless we're sure things will fit assert(!c.valid || c.ready) params.ccover(!c.ready, "SOURCEC_QUEUE_FULL", "Eviction queue fully utilized") queue.io.enq <> c io.c <> queue.io.deq }
module SourceC( // @[SourceC.scala:35:7] input clock, // @[SourceC.scala:35:7] input reset, // @[SourceC.scala:35:7] output io_req_ready, // @[SourceC.scala:37:14] input io_req_valid, // @[SourceC.scala:37:14] input [2:0] io_req_bits_opcode, // @[SourceC.scala:37:14] input [2:0] io_req_bits_param, // @[SourceC.scala:37:14] input [3:0] io_req_bits_source, // @[SourceC.scala:37:14] input [8:0] io_req_bits_tag, // @[SourceC.scala:37:14] input [10:0] io_req_bits_set, // @[SourceC.scala:37:14] input [3:0] io_req_bits_way, // @[SourceC.scala:37:14] input io_req_bits_dirty, // @[SourceC.scala:37:14] input io_c_ready, // @[SourceC.scala:37:14] output io_c_valid, // @[SourceC.scala:37:14] output [2:0] io_c_bits_opcode, // @[SourceC.scala:37:14] output [2:0] io_c_bits_param, // @[SourceC.scala:37:14] output [2:0] io_c_bits_size, // @[SourceC.scala:37:14] output [3:0] io_c_bits_source, // @[SourceC.scala:37:14] output [31:0] io_c_bits_address, // @[SourceC.scala:37:14] output [63:0] io_c_bits_data, // @[SourceC.scala:37:14] output io_c_bits_corrupt, // @[SourceC.scala:37:14] input io_bs_adr_ready, // @[SourceC.scala:37:14] output io_bs_adr_valid, // @[SourceC.scala:37:14] output [3:0] io_bs_adr_bits_way, // @[SourceC.scala:37:14] output [10:0] io_bs_adr_bits_set, // @[SourceC.scala:37:14] output [2:0] io_bs_adr_bits_beat, // @[SourceC.scala:37:14] input [63:0] io_bs_dat_data, // @[SourceC.scala:37:14] output [10:0] io_evict_req_set, // @[SourceC.scala:37:14] output [3:0] io_evict_req_way, // @[SourceC.scala:37:14] input io_evict_safe // @[SourceC.scala:37:14] ); wire _queue_io_enq_ready; // @[SourceC.scala:54:21] wire _queue_io_deq_valid; // @[SourceC.scala:54:21] wire [3:0] _queue_io_count; // @[SourceC.scala:54:21] wire io_req_valid_0 = io_req_valid; // @[SourceC.scala:35:7] wire [2:0] io_req_bits_opcode_0 = io_req_bits_opcode; // @[SourceC.scala:35:7] wire [2:0] io_req_bits_param_0 = io_req_bits_param; // @[SourceC.scala:35:7] wire [3:0] io_req_bits_source_0 = io_req_bits_source; // @[SourceC.scala:35:7] wire [8:0] io_req_bits_tag_0 = io_req_bits_tag; // @[SourceC.scala:35:7] wire [10:0] io_req_bits_set_0 = io_req_bits_set; // @[SourceC.scala:35:7] wire [3:0] io_req_bits_way_0 = io_req_bits_way; // @[SourceC.scala:35:7] wire io_req_bits_dirty_0 = io_req_bits_dirty; // @[SourceC.scala:35:7] wire io_c_ready_0 = io_c_ready; // @[SourceC.scala:35:7] wire io_bs_adr_ready_0 = io_bs_adr_ready; // @[SourceC.scala:35:7] wire [63:0] io_bs_dat_data_0 = io_bs_dat_data; // @[SourceC.scala:35:7] wire io_evict_safe_0 = io_evict_safe; // @[SourceC.scala:35:7] wire _c_bits_address_base_T_2 = reset; // @[Parameters.scala:222:12] wire _c_bits_address_base_T_8 = reset; // @[Parameters.scala:222:12] wire _c_bits_address_base_T_14 = reset; // @[Parameters.scala:222:12] wire io_bs_adr_bits_noop = 1'h0; // @[SourceC.scala:35:7] wire c_bits_corrupt = 1'h0; // @[SourceC.scala:108:15] wire _c_bits_address_base_T = 1'h0; // @[Parameters.scala:222:15] wire _c_bits_address_base_T_4 = 1'h0; // @[Parameters.scala:222:12] wire _c_bits_address_base_T_6 = 1'h0; // @[Parameters.scala:222:15] wire _c_bits_address_base_T_10 = 1'h0; // @[Parameters.scala:222:12] wire _c_bits_address_base_T_12 = 1'h0; // @[Parameters.scala:222:15] wire _c_bits_address_base_T_16 = 1'h0; // @[Parameters.scala:222:12] wire io_bs_adr_bits_mask = 1'h1; // @[SourceC.scala:35:7] wire _io_bs_adr_bits_mask_T = 1'h1; // @[SourceC.scala:82:26] wire _c_bits_address_base_T_1 = 1'h1; // @[Parameters.scala:222:24] wire _c_bits_address_base_T_7 = 1'h1; // @[Parameters.scala:222:24] wire _c_bits_address_base_T_13 = 1'h1; // @[Parameters.scala:222:24] wire [1:0] c_bits_address_lo_lo_hi_hi = 2'h0; // @[Parameters.scala:230:8] wire [1:0] c_bits_address_hi_hi_hi_lo = 2'h0; // @[Parameters.scala:230:8] wire [5:0] c_bits_address_base_y_2 = 6'h0; // @[Parameters.scala:221:15] wire [5:0] _c_bits_address_base_T_17 = 6'h0; // @[Parameters.scala:223:6] wire [2:0] c_bits_size = 3'h6; // @[SourceC.scala:108:15] wire [2:0] _last_T = 3'h7; // @[SourceC.scala:68:99] wire [3:0] _fill_T_1 = 4'hF; // @[SourceC.scala:61:48] wire _io_req_ready_T_1; // @[SourceC.scala:72:25] wire _io_bs_adr_valid_T_2; // @[SourceC.scala:77:50] wire [3:0] req_way; // @[SourceC.scala:69:17] wire [10:0] req_set; // @[SourceC.scala:69:17] wire [63:0] c_bits_data = io_bs_dat_data_0; // @[SourceC.scala:35:7, :108:15] wire io_req_ready_0; // @[SourceC.scala:35:7] wire [2:0] io_c_bits_opcode_0; // @[SourceC.scala:35:7] wire [2:0] io_c_bits_param_0; // @[SourceC.scala:35:7] wire [2:0] io_c_bits_size_0; // @[SourceC.scala:35:7] wire [3:0] io_c_bits_source_0; // @[SourceC.scala:35:7] wire [31:0] io_c_bits_address_0; // @[SourceC.scala:35:7] wire [63:0] io_c_bits_data_0; // @[SourceC.scala:35:7] wire io_c_bits_corrupt_0; // @[SourceC.scala:35:7] wire io_c_valid_0; // @[SourceC.scala:35:7] wire [3:0] io_bs_adr_bits_way_0; // @[SourceC.scala:35:7] wire [10:0] io_bs_adr_bits_set_0; // @[SourceC.scala:35:7] wire [2:0] io_bs_adr_bits_beat_0; // @[SourceC.scala:35:7] wire io_bs_adr_valid_0; // @[SourceC.scala:35:7] wire [10:0] io_evict_req_set_0; // @[SourceC.scala:35:7] wire [3:0] io_evict_req_way_0; // @[SourceC.scala:35:7] reg [3:0] fill; // @[SourceC.scala:58:21] reg room; // @[SourceC.scala:59:21] wire c_valid; // @[SourceC.scala:108:15] wire _T = _queue_io_enq_ready & c_valid; // @[Decoupled.scala:51:35] wire _fill_T; // @[Decoupled.scala:51:35] assign _fill_T = _T; // @[Decoupled.scala:51:35] wire _room_T_4; // @[Decoupled.scala:51:35] assign _room_T_4 = _T; // @[Decoupled.scala:51:35] wire [3:0] _fill_T_2 = _fill_T ? 4'h1 : 4'hF; // @[Decoupled.scala:51:35] wire [4:0] _fill_T_3 = {1'h0, fill} + {1'h0, _fill_T_2}; // @[SourceC.scala:58:21, :61:{18,23}] wire [3:0] _fill_T_4 = _fill_T_3[3:0]; // @[SourceC.scala:61:18] wire _room_T = fill == 4'h0; // @[SourceC.scala:58:21, :62:18] wire _room_T_1 = fill == 4'h1; // @[SourceC.scala:58:21, :62:36] wire _room_T_2 = fill == 4'h2; // @[SourceC.scala:58:21, :62:52] wire _room_T_3 = _room_T_1 | _room_T_2; // @[SourceC.scala:62:{36,44,52}] wire _room_T_5 = ~_room_T_4; // @[Decoupled.scala:51:35] wire _room_T_6 = _room_T_3 & _room_T_5; // @[SourceC.scala:62:{44,61,64}] wire _room_T_7 = _room_T | _room_T_6; // @[SourceC.scala:62:{18,26,61}] reg busy; // @[SourceC.scala:66:21] reg [2:0] beat; // @[SourceC.scala:67:21] assign io_bs_adr_bits_beat_0 = beat; // @[SourceC.scala:35:7, :67:21] wire last = &beat; // @[SourceC.scala:67:21, :68:95] wire _req_T = ~busy; // @[SourceC.scala:66:21, :69:18] wire _req_T_1 = ~busy; // @[SourceC.scala:66:21, :69:{18,61}] wire _req_T_2 = _req_T_1 & io_req_valid_0; // @[SourceC.scala:35:7, :69:{61,67}] reg [2:0] req_r_opcode; // @[SourceC.scala:69:47] reg [2:0] req_r_param; // @[SourceC.scala:69:47] reg [3:0] req_r_source; // @[SourceC.scala:69:47] reg [8:0] req_r_tag; // @[SourceC.scala:69:47] reg [10:0] req_r_set; // @[SourceC.scala:69:47] reg [3:0] req_r_way; // @[SourceC.scala:69:47] reg req_r_dirty; // @[SourceC.scala:69:47] wire [2:0] req_opcode = _req_T ? io_req_bits_opcode_0 : req_r_opcode; // @[SourceC.scala:35:7, :69:{17,18,47}] wire [2:0] req_param = _req_T ? io_req_bits_param_0 : req_r_param; // @[SourceC.scala:35:7, :69:{17,18,47}] wire [3:0] req_source = _req_T ? io_req_bits_source_0 : req_r_source; // @[SourceC.scala:35:7, :69:{17,18,47}] wire [8:0] req_tag = _req_T ? io_req_bits_tag_0 : req_r_tag; // @[SourceC.scala:35:7, :69:{17,18,47}] assign req_set = _req_T ? io_req_bits_set_0 : req_r_set; // @[SourceC.scala:35:7, :69:{17,18,47}] assign req_way = _req_T ? io_req_bits_way_0 : req_r_way; // @[SourceC.scala:35:7, :69:{17,18,47}] wire req_dirty = _req_T ? io_req_bits_dirty_0 : req_r_dirty; // @[SourceC.scala:35:7, :69:{17,18,47}] assign io_bs_adr_bits_set_0 = req_set; // @[SourceC.scala:35:7, :69:17] assign io_evict_req_set_0 = req_set; // @[SourceC.scala:35:7, :69:17] assign io_bs_adr_bits_way_0 = req_way; // @[SourceC.scala:35:7, :69:17] assign io_evict_req_way_0 = req_way; // @[SourceC.scala:35:7, :69:17] wire _want_data_T = io_req_valid_0 & room; // @[SourceC.scala:35:7, :59:21, :70:41] wire _want_data_T_1 = _want_data_T & io_req_bits_dirty_0; // @[SourceC.scala:35:7, :70:{41,49}] wire want_data = busy | _want_data_T_1; // @[SourceC.scala:66:21, :70:{24,49}] wire _io_req_ready_T = ~busy; // @[SourceC.scala:66:21, :69:18, :72:19] assign _io_req_ready_T_1 = _io_req_ready_T & room; // @[SourceC.scala:59:21, :72:{19,25}] assign io_req_ready_0 = _io_req_ready_T_1; // @[SourceC.scala:35:7, :72:25] wire _io_bs_adr_valid_T = |beat; // @[SourceC.scala:67:21, :77:28] wire _io_bs_adr_valid_T_1 = _io_bs_adr_valid_T | io_evict_safe_0; // @[SourceC.scala:35:7, :77:{28,32}] assign _io_bs_adr_valid_T_2 = _io_bs_adr_valid_T_1 & want_data; // @[SourceC.scala:70:24, :77:{32,50}] assign io_bs_adr_valid_0 = _io_bs_adr_valid_T_2; // @[SourceC.scala:35:7, :77:50] wire _s2_latch_T = io_bs_adr_ready_0 & io_bs_adr_valid_0; // @[Decoupled.scala:51:35] wire [3:0] _beat_T = {1'h0, beat} + 4'h1; // @[SourceC.scala:67:21, :89:18] wire [2:0] _beat_T_1 = _beat_T[2:0]; // @[SourceC.scala:89:18] wire _s2_latch_T_1 = io_req_ready_0 & io_req_valid_0; // @[Decoupled.scala:51:35] wire s2_latch = want_data ? _s2_latch_T : _s2_latch_T_1; // @[Decoupled.scala:51:35] reg s2_valid; // @[SourceC.scala:97:25] reg [2:0] s2_req_opcode; // @[SourceC.scala:98:25] reg [2:0] s2_req_param; // @[SourceC.scala:98:25] reg [3:0] s2_req_source; // @[SourceC.scala:98:25] reg [8:0] s2_req_tag; // @[SourceC.scala:98:25] reg [10:0] s2_req_set; // @[SourceC.scala:98:25] reg [3:0] s2_req_way; // @[SourceC.scala:98:25] reg s2_req_dirty; // @[SourceC.scala:98:25] reg [2:0] s2_beat; // @[SourceC.scala:99:26] reg s2_last; // @[SourceC.scala:100:26] reg s3_valid; // @[SourceC.scala:103:25] assign c_valid = s3_valid; // @[SourceC.scala:103:25, :108:15] reg [2:0] s3_req_opcode; // @[SourceC.scala:104:25] wire [2:0] c_bits_opcode = s3_req_opcode; // @[SourceC.scala:104:25, :108:15] reg [2:0] s3_req_param; // @[SourceC.scala:104:25] wire [2:0] c_bits_param = s3_req_param; // @[SourceC.scala:104:25, :108:15] reg [3:0] s3_req_source; // @[SourceC.scala:104:25] wire [3:0] c_bits_source = s3_req_source; // @[SourceC.scala:104:25, :108:15] reg [8:0] s3_req_tag; // @[SourceC.scala:104:25] wire [8:0] c_bits_address_base_y = s3_req_tag; // @[SourceC.scala:104:25] reg [10:0] s3_req_set; // @[SourceC.scala:104:25] wire [10:0] c_bits_address_base_y_1 = s3_req_set; // @[SourceC.scala:104:25] reg [3:0] s3_req_way; // @[SourceC.scala:104:25] reg s3_req_dirty; // @[SourceC.scala:104:25] reg [2:0] s3_beat; // @[SourceC.scala:105:26] reg s3_last; // @[SourceC.scala:106:26] wire [31:0] _c_bits_address_T_26; // @[Parameters.scala:230:8] wire [31:0] c_bits_address; // @[SourceC.scala:108:15] wire c_ready; // @[SourceC.scala:108:15] wire [8:0] _c_bits_address_base_T_5 = c_bits_address_base_y; // @[Parameters.scala:221:15, :223:6] wire _c_bits_address_base_T_3 = ~_c_bits_address_base_T_2; // @[Parameters.scala:222:12] wire [10:0] _c_bits_address_base_T_11 = c_bits_address_base_y_1; // @[Parameters.scala:221:15, :223:6] wire _c_bits_address_base_T_9 = ~_c_bits_address_base_T_8; // @[Parameters.scala:222:12] wire _c_bits_address_base_T_15 = ~_c_bits_address_base_T_14; // @[Parameters.scala:222:12] wire [19:0] c_bits_address_base_hi = {_c_bits_address_base_T_5, _c_bits_address_base_T_11}; // @[Parameters.scala:223:6, :227:19] wire [25:0] c_bits_address_base = {c_bits_address_base_hi, 6'h0}; // @[Parameters.scala:227:19] wire _c_bits_address_T = c_bits_address_base[0]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_1 = c_bits_address_base[1]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_2 = c_bits_address_base[2]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_3 = c_bits_address_base[3]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_4 = c_bits_address_base[4]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_5 = c_bits_address_base[5]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_6 = c_bits_address_base[6]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_7 = c_bits_address_base[7]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_8 = c_bits_address_base[8]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_9 = c_bits_address_base[9]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_10 = c_bits_address_base[10]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_11 = c_bits_address_base[11]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_12 = c_bits_address_base[12]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_13 = c_bits_address_base[13]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_14 = c_bits_address_base[14]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_15 = c_bits_address_base[15]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_16 = c_bits_address_base[16]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_17 = c_bits_address_base[17]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_18 = c_bits_address_base[18]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_19 = c_bits_address_base[19]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_20 = c_bits_address_base[20]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_21 = c_bits_address_base[21]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_22 = c_bits_address_base[22]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_23 = c_bits_address_base[23]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_24 = c_bits_address_base[24]; // @[Parameters.scala:227:19, :229:72] wire _c_bits_address_T_25 = c_bits_address_base[25]; // @[Parameters.scala:227:19, :229:72] wire [1:0] c_bits_address_lo_lo_lo_lo = {_c_bits_address_T_1, _c_bits_address_T}; // @[Parameters.scala:229:72, :230:8] wire [1:0] c_bits_address_lo_lo_lo_hi = {_c_bits_address_T_3, _c_bits_address_T_2}; // @[Parameters.scala:229:72, :230:8] wire [3:0] c_bits_address_lo_lo_lo = {c_bits_address_lo_lo_lo_hi, c_bits_address_lo_lo_lo_lo}; // @[Parameters.scala:230:8] wire [1:0] c_bits_address_lo_lo_hi_lo = {_c_bits_address_T_5, _c_bits_address_T_4}; // @[Parameters.scala:229:72, :230:8] wire [3:0] c_bits_address_lo_lo_hi = {2'h0, c_bits_address_lo_lo_hi_lo}; // @[Parameters.scala:230:8] wire [7:0] c_bits_address_lo_lo = {c_bits_address_lo_lo_hi, c_bits_address_lo_lo_lo}; // @[Parameters.scala:230:8] wire [1:0] c_bits_address_lo_hi_lo_lo = {_c_bits_address_T_6, 1'h0}; // @[Parameters.scala:229:72, :230:8] wire [1:0] c_bits_address_lo_hi_lo_hi = {_c_bits_address_T_8, _c_bits_address_T_7}; // @[Parameters.scala:229:72, :230:8] wire [3:0] c_bits_address_lo_hi_lo = {c_bits_address_lo_hi_lo_hi, c_bits_address_lo_hi_lo_lo}; // @[Parameters.scala:230:8] wire [1:0] c_bits_address_lo_hi_hi_lo = {_c_bits_address_T_10, _c_bits_address_T_9}; // @[Parameters.scala:229:72, :230:8] wire [1:0] c_bits_address_lo_hi_hi_hi = {_c_bits_address_T_12, _c_bits_address_T_11}; // @[Parameters.scala:229:72, :230:8] wire [3:0] c_bits_address_lo_hi_hi = {c_bits_address_lo_hi_hi_hi, c_bits_address_lo_hi_hi_lo}; // @[Parameters.scala:230:8] wire [7:0] c_bits_address_lo_hi = {c_bits_address_lo_hi_hi, c_bits_address_lo_hi_lo}; // @[Parameters.scala:230:8] wire [15:0] c_bits_address_lo = {c_bits_address_lo_hi, c_bits_address_lo_lo}; // @[Parameters.scala:230:8] wire [1:0] c_bits_address_hi_lo_lo_lo = {_c_bits_address_T_14, _c_bits_address_T_13}; // @[Parameters.scala:229:72, :230:8] wire [1:0] c_bits_address_hi_lo_lo_hi = {_c_bits_address_T_16, _c_bits_address_T_15}; // @[Parameters.scala:229:72, :230:8] wire [3:0] c_bits_address_hi_lo_lo = {c_bits_address_hi_lo_lo_hi, c_bits_address_hi_lo_lo_lo}; // @[Parameters.scala:230:8] wire [1:0] c_bits_address_hi_lo_hi_lo = {_c_bits_address_T_18, _c_bits_address_T_17}; // @[Parameters.scala:229:72, :230:8] wire [1:0] c_bits_address_hi_lo_hi_hi = {_c_bits_address_T_20, _c_bits_address_T_19}; // @[Parameters.scala:229:72, :230:8] wire [3:0] c_bits_address_hi_lo_hi = {c_bits_address_hi_lo_hi_hi, c_bits_address_hi_lo_hi_lo}; // @[Parameters.scala:230:8] wire [7:0] c_bits_address_hi_lo = {c_bits_address_hi_lo_hi, c_bits_address_hi_lo_lo}; // @[Parameters.scala:230:8] wire [1:0] c_bits_address_hi_hi_lo_lo = {_c_bits_address_T_22, _c_bits_address_T_21}; // @[Parameters.scala:229:72, :230:8] wire [1:0] c_bits_address_hi_hi_lo_hi = {_c_bits_address_T_24, _c_bits_address_T_23}; // @[Parameters.scala:229:72, :230:8] wire [3:0] c_bits_address_hi_hi_lo = {c_bits_address_hi_hi_lo_hi, c_bits_address_hi_hi_lo_lo}; // @[Parameters.scala:230:8] wire [1:0] c_bits_address_hi_hi_hi_hi = {_c_bits_address_T_25, 1'h0}; // @[Parameters.scala:229:72, :230:8] wire [3:0] c_bits_address_hi_hi_hi = {c_bits_address_hi_hi_hi_hi, 2'h0}; // @[Parameters.scala:230:8] wire [7:0] c_bits_address_hi_hi = {c_bits_address_hi_hi_hi, c_bits_address_hi_hi_lo}; // @[Parameters.scala:230:8] wire [15:0] c_bits_address_hi = {c_bits_address_hi_hi, c_bits_address_hi_lo}; // @[Parameters.scala:230:8] assign _c_bits_address_T_26 = {c_bits_address_hi, c_bits_address_lo}; // @[Parameters.scala:230:8] assign c_bits_address = _c_bits_address_T_26; // @[SourceC.scala:108:15]
Generate the Verilog code corresponding to the following Chisel files. File RoundAnyRawFNToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util.Fill import consts._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundAnyRawFNToRecFN( inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int, options: Int ) extends RawModule { override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(inExpWidth, inSigWidth)) // (allowed exponent range has limits) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((outExpWidth + outSigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0) val effectiveInSigWidth = if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1 val neverUnderflows = ((options & (flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact) ) != 0) || (inExpWidth < outExpWidth) val neverOverflows = ((options & flRoundOpt_neverOverflows) != 0) || (inExpWidth < outExpWidth) val outNaNExp = BigInt(7)<<(outExpWidth - 2) val outInfExp = BigInt(6)<<(outExpWidth - 2) val outMaxFiniteExp = outInfExp - 1 val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2 val outMinNonzeroExp = outMinNormExp - outSigWidth + 1 //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundingMode_near_even = (io.roundingMode === round_near_even) val roundingMode_minMag = (io.roundingMode === round_minMag) val roundingMode_min = (io.roundingMode === round_min) val roundingMode_max = (io.roundingMode === round_max) val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag) val roundingMode_odd = (io.roundingMode === round_odd) val roundMagUp = (roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sAdjustedExp = if (inExpWidth < outExpWidth) (io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S )(outExpWidth, 0).zext else if (inExpWidth == outExpWidth) io.in.sExp else io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S val adjustedSig = if (inSigWidth <= outSigWidth + 2) io.in.sig<<(outSigWidth - inSigWidth + 2) else (io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ## io.in.sig(inSigWidth - outSigWidth - 2, 0).orR ) val doShiftSigDown1 = if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2) val common_expOut = Wire(UInt((outExpWidth + 1).W)) val common_fractOut = Wire(UInt((outSigWidth - 1).W)) val common_overflow = Wire(Bool()) val common_totalUnderflow = Wire(Bool()) val common_underflow = Wire(Bool()) val common_inexact = Wire(Bool()) if ( neverOverflows && neverUnderflows && (effectiveInSigWidth <= outSigWidth) ) { //-------------------------------------------------------------------- //-------------------------------------------------------------------- common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1 common_fractOut := Mux(doShiftSigDown1, adjustedSig(outSigWidth + 1, 3), adjustedSig(outSigWidth, 2) ) common_overflow := false.B common_totalUnderflow := false.B common_underflow := false.B common_inexact := false.B } else { //-------------------------------------------------------------------- //-------------------------------------------------------------------- val roundMask = if (neverUnderflows) 0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W) else (lowMask( sAdjustedExp(outExpWidth, 0), outMinNormExp - outSigWidth - 1, outMinNormExp ) | doShiftSigDown1) ## 3.U(2.W) val shiftedRoundMask = 0.U(1.W) ## roundMask>>1 val roundPosMask = ~shiftedRoundMask & roundMask val roundPosBit = (adjustedSig & roundPosMask).orR val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR val anyRound = roundPosBit || anyRoundExtra val roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && roundPosBit) || (roundMagUp && anyRound) val roundedSig: Bits = Mux(roundIncr, (((adjustedSig | roundMask)>>2) +& 1.U) & ~Mux(roundingMode_near_even && roundPosBit && ! anyRoundExtra, roundMask>>1, 0.U((outSigWidth + 2).W) ), (adjustedSig & ~roundMask)>>2 | Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U) ) //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext common_expOut := sRoundedExp(outExpWidth, 0) common_fractOut := Mux(doShiftSigDown1, roundedSig(outSigWidth - 1, 1), roundedSig(outSigWidth - 2, 0) ) common_overflow := (if (neverOverflows) false.B else //*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?: (sRoundedExp>>(outExpWidth - 1) >= 3.S)) common_totalUnderflow := (if (neverUnderflows) false.B else //*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?: (sRoundedExp < outMinNonzeroExp.S)) val unboundedRange_roundPosBit = Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1)) val unboundedRange_anyRound = (doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR val unboundedRange_roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && unboundedRange_roundPosBit) || (roundMagUp && unboundedRange_anyRound) val roundCarry = Mux(doShiftSigDown1, roundedSig(outSigWidth + 1), roundedSig(outSigWidth) ) common_underflow := (if (neverUnderflows) false.B else common_totalUnderflow || //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? (anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) && Mux(doShiftSigDown1, roundMask(3), roundMask(2)) && ! ((io.detectTininess === tininess_afterRounding) && ! Mux(doShiftSigDown1, roundMask(4), roundMask(3) ) && roundCarry && roundPosBit && unboundedRange_roundIncr))) common_inexact := common_totalUnderflow || anyRound } //------------------------------------------------------------------------ //------------------------------------------------------------------------ val isNaNOut = io.invalidExc || io.in.isNaN val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero val overflow = commonCase && common_overflow val underflow = commonCase && common_underflow val inexact = overflow || (commonCase && common_inexact) val overflow_roundMagUp = roundingMode_near_even || roundingMode_near_maxMag || roundMagUp val pegMinNonzeroMagOut = commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd) val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp val notNaN_isInfOut = notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp) val signOut = Mux(isNaNOut, false.B, io.in.sign) val expOut = (common_expOut & ~Mux(io.in.isZero || common_totalUnderflow, (BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMinNonzeroMagOut, ~outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMaxFiniteMagOut, (BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W), 0.U ) & ~Mux(notNaN_isInfOut, (BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U )) | Mux(pegMinNonzeroMagOut, outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) | Mux(pegMaxFiniteMagOut, outMaxFiniteExp.U((outExpWidth + 1).W), 0.U ) | Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) | Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U) val fractOut = Mux(isNaNOut || io.in.isZero || common_totalUnderflow, Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U), common_fractOut ) | Fill(outSigWidth - 1, pegMaxFiniteMagOut) io.out := signOut ## expOut ## fractOut io.exceptionFlags := io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int) extends RawModule { override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(expWidth, sigWidth + 2)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( expWidth, sigWidth + 2, expWidth, sigWidth, options)) roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc roundAnyRawFNToRecFN.io.in := io.in roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags }
module RoundAnyRawFNToRecFN_ie7_is64_oe11_os53_7( // @[RoundAnyRawFNToRecFN.scala:48:5] input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:58:16] input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:58:16] input [8:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:58:16] input [64:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:58:16] input [2:0] io_roundingMode, // @[RoundAnyRawFNToRecFN.scala:58:16] output [64:0] io_out, // @[RoundAnyRawFNToRecFN.scala:58:16] output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:58:16] ); wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [8:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [64:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [2:0] io_roundingMode_0 = io_roundingMode; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [53:0] _roundMask_T = 54'h0; // @[RoundAnyRawFNToRecFN.scala:153:36] wire [11:0] _expOut_T_4 = 12'hC31; // @[RoundAnyRawFNToRecFN.scala:258:19] wire [55:0] roundMask = 56'h3; // @[RoundAnyRawFNToRecFN.scala:153:55] wire [56:0] _shiftedRoundMask_T = 57'h3; // @[RoundAnyRawFNToRecFN.scala:162:41] wire [55:0] shiftedRoundMask = 56'h1; // @[RoundAnyRawFNToRecFN.scala:162:53] wire [55:0] _roundPosMask_T = 56'hFFFFFFFFFFFFFE; // @[RoundAnyRawFNToRecFN.scala:163:28] wire [55:0] roundPosMask = 56'h2; // @[RoundAnyRawFNToRecFN.scala:163:46] wire [55:0] _roundedSig_T_10 = 56'hFFFFFFFFFFFFFC; // @[RoundAnyRawFNToRecFN.scala:180:32] wire [54:0] _roundedSig_T_6 = 55'h1; // @[RoundAnyRawFNToRecFN.scala:177:35, :181:67] wire [54:0] _roundedSig_T_14 = 55'h1; // @[RoundAnyRawFNToRecFN.scala:177:35, :181:67] wire [11:0] _expOut_T_6 = 12'hFFF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14, :265:14] wire [11:0] _expOut_T_9 = 12'hFFF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14, :265:14] wire [11:0] _expOut_T_12 = 12'hFFF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14, :265:14] wire [11:0] _expOut_T_5 = 12'h0; // @[RoundAnyRawFNToRecFN.scala:257:18] wire [11:0] _expOut_T_8 = 12'h0; // @[RoundAnyRawFNToRecFN.scala:261:18] wire [11:0] _expOut_T_11 = 12'h0; // @[RoundAnyRawFNToRecFN.scala:265:18] wire [11:0] _expOut_T_14 = 12'h0; // @[RoundAnyRawFNToRecFN.scala:269:16] wire [11:0] _expOut_T_16 = 12'h0; // @[RoundAnyRawFNToRecFN.scala:273:16] wire [11:0] _expOut_T_18 = 12'h0; // @[RoundAnyRawFNToRecFN.scala:277:16] wire [11:0] _expOut_T_20 = 12'h0; // @[RoundAnyRawFNToRecFN.scala:278:16] wire [51:0] _fractOut_T_2 = 52'h0; // @[RoundAnyRawFNToRecFN.scala:281:16, :284:13] wire [51:0] _fractOut_T_4 = 52'h0; // @[RoundAnyRawFNToRecFN.scala:281:16, :284:13] wire [1:0] _io_exceptionFlags_T = 2'h0; // @[RoundAnyRawFNToRecFN.scala:288:23] wire [2:0] _io_exceptionFlags_T_1 = 3'h0; // @[RoundAnyRawFNToRecFN.scala:288:41] wire [3:0] _io_exceptionFlags_T_2 = 4'h0; // @[RoundAnyRawFNToRecFN.scala:288:53] wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5] wire _commonCase_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:237:22] wire _commonCase_T_1 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:237:36] wire _commonCase_T_2 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:237:33] wire io_invalidExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_isNaN = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_isInf = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire common_overflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:124:37] wire common_totalUnderflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:125:37] wire common_underflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:126:37] wire _unboundedRange_anyRound_T_1 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:205:30] wire isNaNOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:235:34] wire notNaN_isSpecialInfOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:236:49] wire overflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:238:32] wire underflow = 1'h0; // @[RoundAnyRawFNToRecFN.scala:239:32] wire _pegMinNonzeroMagOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:20] wire pegMinNonzeroMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:45] wire pegMaxFiniteMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:39] wire _notNaN_isInfOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:248:45] wire notNaN_isInfOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:248:32] wire _expOut_T = io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :253:32] wire _fractOut_T = io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :280:22] wire signOut = io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :250:22] wire [64:0] _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:286:33] wire [4:0] _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:288:66] wire [64:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire roundingMode_near_even = io_roundingMode_0 == 3'h0; // @[RoundAnyRawFNToRecFN.scala:48:5, :90:53, :288:41] wire roundingMode_minMag = io_roundingMode_0 == 3'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :91:53] wire roundingMode_min = io_roundingMode_0 == 3'h2; // @[RoundAnyRawFNToRecFN.scala:48:5, :92:53] wire roundingMode_max = io_roundingMode_0 == 3'h3; // @[RoundAnyRawFNToRecFN.scala:48:5, :93:53] wire roundingMode_near_maxMag = io_roundingMode_0 == 3'h4; // @[RoundAnyRawFNToRecFN.scala:48:5, :94:53] wire roundingMode_odd = io_roundingMode_0 == 3'h6; // @[RoundAnyRawFNToRecFN.scala:48:5, :95:53] wire _roundMagUp_T = roundingMode_min & io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :92:53, :98:27] wire _roundMagUp_T_1 = ~io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :98:66] wire _roundMagUp_T_2 = roundingMode_max & _roundMagUp_T_1; // @[RoundAnyRawFNToRecFN.scala:93:53, :98:{63,66}] wire roundMagUp = _roundMagUp_T | _roundMagUp_T_2; // @[RoundAnyRawFNToRecFN.scala:98:{27,42,63}] wire [12:0] _sAdjustedExp_T = {{4{io_in_sExp_0[8]}}, io_in_sExp_0} + 13'h780; // @[RoundAnyRawFNToRecFN.scala:48:5, :104:25] wire [11:0] _sAdjustedExp_T_1 = _sAdjustedExp_T[11:0]; // @[RoundAnyRawFNToRecFN.scala:104:25, :106:14] wire [12:0] sAdjustedExp = {1'h0, _sAdjustedExp_T_1}; // @[RoundAnyRawFNToRecFN.scala:106:{14,31}] wire [54:0] _adjustedSig_T = io_in_sig_0[64:10]; // @[RoundAnyRawFNToRecFN.scala:48:5, :116:23] wire [9:0] _adjustedSig_T_1 = io_in_sig_0[9:0]; // @[RoundAnyRawFNToRecFN.scala:48:5, :117:26] wire _adjustedSig_T_2 = |_adjustedSig_T_1; // @[RoundAnyRawFNToRecFN.scala:117:{26,60}] wire [55:0] adjustedSig = {_adjustedSig_T, _adjustedSig_T_2}; // @[RoundAnyRawFNToRecFN.scala:116:{23,66}, :117:60] wire [11:0] _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:187:37] wire [11:0] common_expOut; // @[RoundAnyRawFNToRecFN.scala:122:31] wire [51:0] _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:189:16] wire [51:0] common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31] wire _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:230:49] wire common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37] wire [55:0] _roundPosBit_T = adjustedSig & 56'h2; // @[RoundAnyRawFNToRecFN.scala:116:66, :163:46, :164:40] wire roundPosBit = |_roundPosBit_T; // @[RoundAnyRawFNToRecFN.scala:164:{40,56}] wire [55:0] _anyRoundExtra_T = adjustedSig & 56'h1; // @[RoundAnyRawFNToRecFN.scala:116:66, :162:53, :165:42] wire anyRoundExtra = |_anyRoundExtra_T; // @[RoundAnyRawFNToRecFN.scala:165:{42,62}] wire anyRound = roundPosBit | anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:164:56, :165:62, :166:36] assign _common_inexact_T = anyRound; // @[RoundAnyRawFNToRecFN.scala:166:36, :230:49] wire _GEN = roundingMode_near_even | roundingMode_near_maxMag; // @[RoundAnyRawFNToRecFN.scala:90:53, :94:53, :169:38] wire _roundIncr_T; // @[RoundAnyRawFNToRecFN.scala:169:38] assign _roundIncr_T = _GEN; // @[RoundAnyRawFNToRecFN.scala:169:38] wire _unboundedRange_roundIncr_T; // @[RoundAnyRawFNToRecFN.scala:207:38] assign _unboundedRange_roundIncr_T = _GEN; // @[RoundAnyRawFNToRecFN.scala:169:38, :207:38] wire _overflow_roundMagUp_T; // @[RoundAnyRawFNToRecFN.scala:243:32] assign _overflow_roundMagUp_T = _GEN; // @[RoundAnyRawFNToRecFN.scala:169:38, :243:32] wire _roundIncr_T_1 = _roundIncr_T & roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :169:{38,67}] wire _roundIncr_T_2 = roundMagUp & anyRound; // @[RoundAnyRawFNToRecFN.scala:98:42, :166:36, :171:29] wire roundIncr = _roundIncr_T_1 | _roundIncr_T_2; // @[RoundAnyRawFNToRecFN.scala:169:67, :170:31, :171:29] wire [55:0] _roundedSig_T = adjustedSig | 56'h3; // @[RoundAnyRawFNToRecFN.scala:116:66, :153:55, :174:32] wire [53:0] _roundedSig_T_1 = _roundedSig_T[55:2]; // @[RoundAnyRawFNToRecFN.scala:174:{32,44}] wire [54:0] _roundedSig_T_2 = {1'h0, _roundedSig_T_1} + 55'h1; // @[RoundAnyRawFNToRecFN.scala:174:{44,49}, :177:35, :181:67] wire _roundedSig_T_3 = roundingMode_near_even & roundPosBit; // @[RoundAnyRawFNToRecFN.scala:90:53, :164:56, :175:49] wire _roundedSig_T_4 = ~anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:165:62, :176:30] wire _roundedSig_T_5 = _roundedSig_T_3 & _roundedSig_T_4; // @[RoundAnyRawFNToRecFN.scala:175:{49,64}, :176:30] wire [54:0] _roundedSig_T_7 = {54'h0, _roundedSig_T_5}; // @[RoundAnyRawFNToRecFN.scala:175:{25,64}] wire [54:0] _roundedSig_T_8 = ~_roundedSig_T_7; // @[RoundAnyRawFNToRecFN.scala:175:{21,25}] wire [54:0] _roundedSig_T_9 = _roundedSig_T_2 & _roundedSig_T_8; // @[RoundAnyRawFNToRecFN.scala:174:{49,57}, :175:21] wire [55:0] _roundedSig_T_11 = adjustedSig & 56'hFFFFFFFFFFFFFC; // @[RoundAnyRawFNToRecFN.scala:116:66, :180:{30,32}] wire [53:0] _roundedSig_T_12 = _roundedSig_T_11[55:2]; // @[RoundAnyRawFNToRecFN.scala:180:{30,43}] wire _roundedSig_T_13 = roundingMode_odd & anyRound; // @[RoundAnyRawFNToRecFN.scala:95:53, :166:36, :181:42] wire [54:0] _roundedSig_T_15 = {54'h0, _roundedSig_T_13}; // @[RoundAnyRawFNToRecFN.scala:181:{24,42}] wire [54:0] _roundedSig_T_16 = {1'h0, _roundedSig_T_12} | _roundedSig_T_15; // @[RoundAnyRawFNToRecFN.scala:180:{43,47}, :181:24] wire [54:0] roundedSig = roundIncr ? _roundedSig_T_9 : _roundedSig_T_16; // @[RoundAnyRawFNToRecFN.scala:170:31, :173:16, :174:57, :180:47] wire [1:0] _sRoundedExp_T = roundedSig[54:53]; // @[RoundAnyRawFNToRecFN.scala:173:16, :185:54] wire [2:0] _sRoundedExp_T_1 = {1'h0, _sRoundedExp_T}; // @[RoundAnyRawFNToRecFN.scala:185:{54,76}] wire [13:0] sRoundedExp = {sAdjustedExp[12], sAdjustedExp} + {{11{_sRoundedExp_T_1[2]}}, _sRoundedExp_T_1}; // @[RoundAnyRawFNToRecFN.scala:106:31, :185:{40,76}] assign _common_expOut_T = sRoundedExp[11:0]; // @[RoundAnyRawFNToRecFN.scala:185:40, :187:37] assign common_expOut = _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:122:31, :187:37] wire [51:0] _common_fractOut_T = roundedSig[52:1]; // @[RoundAnyRawFNToRecFN.scala:173:16, :190:27] wire [51:0] _common_fractOut_T_1 = roundedSig[51:0]; // @[RoundAnyRawFNToRecFN.scala:173:16, :191:27] assign _common_fractOut_T_2 = _common_fractOut_T_1; // @[RoundAnyRawFNToRecFN.scala:189:16, :191:27] assign common_fractOut = _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:123:31, :189:16] wire _unboundedRange_roundPosBit_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:116:66, :203:45] wire _unboundedRange_anyRound_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:116:66, :203:45, :205:44] wire _unboundedRange_roundPosBit_T_1 = adjustedSig[1]; // @[RoundAnyRawFNToRecFN.scala:116:66, :203:61] wire unboundedRange_roundPosBit = _unboundedRange_roundPosBit_T_1; // @[RoundAnyRawFNToRecFN.scala:203:{16,61}] wire [1:0] _unboundedRange_anyRound_T_2 = adjustedSig[1:0]; // @[RoundAnyRawFNToRecFN.scala:116:66, :205:63] wire _unboundedRange_anyRound_T_3 = |_unboundedRange_anyRound_T_2; // @[RoundAnyRawFNToRecFN.scala:205:{63,70}] wire unboundedRange_anyRound = _unboundedRange_anyRound_T_3; // @[RoundAnyRawFNToRecFN.scala:205:{49,70}] wire _unboundedRange_roundIncr_T_1 = _unboundedRange_roundIncr_T & unboundedRange_roundPosBit; // @[RoundAnyRawFNToRecFN.scala:203:16, :207:{38,67}] wire _unboundedRange_roundIncr_T_2 = roundMagUp & unboundedRange_anyRound; // @[RoundAnyRawFNToRecFN.scala:98:42, :205:49, :209:29] wire unboundedRange_roundIncr = _unboundedRange_roundIncr_T_1 | _unboundedRange_roundIncr_T_2; // @[RoundAnyRawFNToRecFN.scala:207:67, :208:46, :209:29] wire _roundCarry_T = roundedSig[54]; // @[RoundAnyRawFNToRecFN.scala:173:16, :212:27] wire _roundCarry_T_1 = roundedSig[53]; // @[RoundAnyRawFNToRecFN.scala:173:16, :213:27] wire roundCarry = _roundCarry_T_1; // @[RoundAnyRawFNToRecFN.scala:211:16, :213:27] assign common_inexact = _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:127:37, :230:49] wire _commonCase_T_3 = ~io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :237:64] wire commonCase = _commonCase_T_3; // @[RoundAnyRawFNToRecFN.scala:237:{61,64}] wire _inexact_T = commonCase & common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37, :237:61, :240:43] wire inexact = _inexact_T; // @[RoundAnyRawFNToRecFN.scala:240:{28,43}] wire overflow_roundMagUp = _overflow_roundMagUp_T | roundMagUp; // @[RoundAnyRawFNToRecFN.scala:98:42, :243:{32,60}] wire _pegMinNonzeroMagOut_T_1 = roundMagUp | roundingMode_odd; // @[RoundAnyRawFNToRecFN.scala:95:53, :98:42, :245:60] wire _pegMaxFiniteMagOut_T = ~overflow_roundMagUp; // @[RoundAnyRawFNToRecFN.scala:243:60, :246:42] wire [11:0] _expOut_T_1 = _expOut_T ? 12'hE00 : 12'h0; // @[RoundAnyRawFNToRecFN.scala:253:{18,32}] wire [11:0] _expOut_T_2 = ~_expOut_T_1; // @[RoundAnyRawFNToRecFN.scala:253:{14,18}] wire [11:0] _expOut_T_3 = common_expOut & _expOut_T_2; // @[RoundAnyRawFNToRecFN.scala:122:31, :252:24, :253:14] wire [11:0] _expOut_T_7 = _expOut_T_3; // @[RoundAnyRawFNToRecFN.scala:252:24, :256:17] wire [11:0] _expOut_T_10 = _expOut_T_7; // @[RoundAnyRawFNToRecFN.scala:256:17, :260:17] wire [11:0] _expOut_T_13 = _expOut_T_10; // @[RoundAnyRawFNToRecFN.scala:260:17, :264:17] wire [11:0] _expOut_T_15 = _expOut_T_13; // @[RoundAnyRawFNToRecFN.scala:264:17, :268:18] wire [11:0] _expOut_T_17 = _expOut_T_15; // @[RoundAnyRawFNToRecFN.scala:268:18, :272:15] wire [11:0] _expOut_T_19 = _expOut_T_17; // @[RoundAnyRawFNToRecFN.scala:272:15, :276:15] wire [11:0] expOut = _expOut_T_19; // @[RoundAnyRawFNToRecFN.scala:276:15, :277:73] wire _fractOut_T_1 = _fractOut_T; // @[RoundAnyRawFNToRecFN.scala:280:{22,38}] wire [51:0] _fractOut_T_3 = _fractOut_T_1 ? 52'h0 : common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31, :280:{12,38}, :281:16, :284:13] wire [51:0] fractOut = _fractOut_T_3; // @[RoundAnyRawFNToRecFN.scala:280:12, :283:11] wire [12:0] _io_out_T = {signOut, expOut}; // @[RoundAnyRawFNToRecFN.scala:250:22, :277:73, :286:23] assign _io_out_T_1 = {_io_out_T, fractOut}; // @[RoundAnyRawFNToRecFN.scala:283:11, :286:{23,33}] assign io_out_0 = _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:48:5, :286:33] assign _io_exceptionFlags_T_3 = {4'h0, inexact}; // @[RoundAnyRawFNToRecFN.scala:240:28, :288:{53,66}] assign io_exceptionFlags_0 = _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:66] assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5] assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5] endmodule
Generate the Verilog code corresponding to the following Chisel files. File UnsafeAXI4ToTL.scala: package ara import chisel3._ import chisel3.util._ import freechips.rocketchip.amba._ import freechips.rocketchip.amba.axi4._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle { val data = UInt(dataWidth.W) val resp = UInt(respWidth.W) val last = Bool() val user = BundleMap(userFields) } /** Parameters for [[BaseReservableListBuffer]] and all child classes. * * @param numEntries Total number of elements that can be stored in the 'data' RAM * @param numLists Maximum number of linked lists * @param numBeats Maximum number of beats per entry */ case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) { // Avoid zero-width wires when we call 'log2Ceil' val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries) val listBits = if (numLists == 1) 1 else log2Ceil(numLists) val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats) } case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName) extends MixedAdapterNode(AXI4Imp, TLImp)( dFn = { case mp => TLMasterPortParameters.v2( masters = mp.masters.zipWithIndex.map { case (m, i) => // Support 'numTlTxns' read requests and 'numTlTxns' write requests at once. val numSourceIds = numTlTxns * 2 TLMasterParameters.v2( name = m.name, sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds), nodePath = m.nodePath ) }, echoFields = mp.echoFields, requestFields = AMBAProtField() +: mp.requestFields, responseKeys = mp.responseKeys ) }, uFn = { mp => AXI4SlavePortParameters( slaves = mp.managers.map { m => val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits)) AXI4SlaveParameters( address = m.address, resources = m.resources, regionType = m.regionType, executable = m.executable, nodePath = m.nodePath, supportsWrite = m.supportsPutPartial.intersect(maxXfer), supportsRead = m.supportsGet.intersect(maxXfer), interleavedId = Some(0) // TL2 never interleaves D beats ) }, beatBytes = mp.beatBytes, minLatency = mp.minLatency, responseFields = mp.responseFields, requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt) ) } ) class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule { require(numTlTxns >= 1) require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2") val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt) lazy val module = new LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => edgeIn.master.masters.foreach { m => require(m.aligned, "AXI4ToTL requires aligned requests") } val numIds = edgeIn.master.endId val beatBytes = edgeOut.slave.beatBytes val maxTransfer = edgeOut.slave.maxTransfer val maxBeats = maxTransfer / beatBytes // Look for an Error device to redirect bad requests val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError") require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.") val errorDev = errorDevs.maxBy(_.maxTransfer) val errorDevAddr = errorDev.address.head.base require( errorDev.supportsPutPartial.contains(maxTransfer), s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer" ) require( errorDev.supportsGet.contains(maxTransfer), s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer" ) // All of the read-response reordering logic. val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields) val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats) val listBuffer = if (numTlTxns > 1) { Module(new ReservableListBuffer(listBufData, listBufParams)) } else { Module(new PassthroughListBuffer(listBufData, listBufParams)) } // To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to // 0 for read requests and 1 for write requests. val isReadSourceBit = 0.U(1.W) val isWriteSourceBit = 1.U(1.W) /* Read request logic */ val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val rBytes1 = in.ar.bits.bytes1() val rSize = OH1ToUInt(rBytes1) val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize) val rId = if (numTlTxns > 1) { Cat(isReadSourceBit, listBuffer.ioReservedIndex) } else { isReadSourceBit } val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Indicates if there are still valid TileLink source IDs left to use. val canIssueR = listBuffer.ioReserve.ready listBuffer.ioReserve.bits := in.ar.bits.id listBuffer.ioReserve.valid := in.ar.valid && rOut.ready in.ar.ready := rOut.ready && canIssueR rOut.valid := in.ar.valid && canIssueR rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2 rOut.bits.user :<= in.ar.bits.user rOut.bits.user.lift(AMBAProt).foreach { rProt => rProt.privileged := in.ar.bits.prot(0) rProt.secure := !in.ar.bits.prot(1) rProt.fetch := in.ar.bits.prot(2) rProt.bufferable := in.ar.bits.cache(0) rProt.modifiable := in.ar.bits.cache(1) rProt.readalloc := in.ar.bits.cache(2) rProt.writealloc := in.ar.bits.cache(3) } /* Write request logic */ // Strip off the MSB, which identifies the transaction as read vs write. val strippedResponseSourceId = if (numTlTxns > 1) { out.d.bits.source((out.d.bits.source).getWidth - 2, 0) } else { // When there's only 1 TileLink transaction allowed for read/write, then this field is always 0. 0.U(1.W) } // Track when a write request burst is in progress. val writeBurstBusy = RegInit(false.B) when(in.w.fire) { writeBurstBusy := !in.w.bits.last } val usedWriteIds = RegInit(0.U(numTlTxns.W)) val canIssueW = !usedWriteIds.andR val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W)) val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W)) usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet // Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't // change mid-burst. val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W)) val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy val freeWriteIdIndex = OHToUInt(freeWriteIdOH) freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val wBytes1 = in.aw.bits.bytes1() val wSize = OH1ToUInt(wBytes1) val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize) val wId = if (numTlTxns > 1) { Cat(isWriteSourceBit, freeWriteIdIndex) } else { isWriteSourceBit } val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain // asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but // the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb // bits during a W-channel burst. in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW in.w.ready := wOut.ready && in.aw.valid && canIssueW wOut.valid := in.aw.valid && in.w.valid && canIssueW wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2 in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ } wOut.bits.user :<= in.aw.bits.user wOut.bits.user.lift(AMBAProt).foreach { wProt => wProt.privileged := in.aw.bits.prot(0) wProt.secure := !in.aw.bits.prot(1) wProt.fetch := in.aw.bits.prot(2) wProt.bufferable := in.aw.bits.cache(0) wProt.modifiable := in.aw.bits.cache(1) wProt.readalloc := in.aw.bits.cache(2) wProt.writealloc := in.aw.bits.cache(3) } // Merge the AXI4 read/write requests into the TL-A channel. TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut)) /* Read/write response logic */ val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle))) val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle))) val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY) val dHasData = edgeOut.hasData(out.d.bits) val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d) val dNumBeats1 = edgeOut.numBeats1(out.d.bits) // Handle cases where writeack arrives before write is done val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck) listBuffer.ioDataOut.ready := okR.ready okR.valid := listBuffer.ioDataOut.valid okB.valid := out.d.valid && !dHasData && !writeEarlyAck listBuffer.ioResponse.valid := out.d.valid && dHasData listBuffer.ioResponse.bits.index := strippedResponseSourceId listBuffer.ioResponse.bits.data.data := out.d.bits.data listBuffer.ioResponse.bits.data.resp := dResp listBuffer.ioResponse.bits.data.last := dLast listBuffer.ioResponse.bits.data.user :<= out.d.bits.user listBuffer.ioResponse.bits.count := dCount listBuffer.ioResponse.bits.numBeats1 := dNumBeats1 okR.bits.id := listBuffer.ioDataOut.bits.listIndex okR.bits.data := listBuffer.ioDataOut.bits.payload.data okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp okR.bits.last := listBuffer.ioDataOut.bits.payload.last okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user // Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write // response, mark the write transaction as complete. val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W)) val writeResponseId = writeIdMap.read(strippedResponseSourceId) when(wOut.fire) { writeIdMap.write(freeWriteIdIndex, in.aw.bits.id) } when(edgeOut.done(wOut)) { usedWriteIdsSet := freeWriteIdOH } when(okB.fire) { usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns) } okB.bits.id := writeResponseId okB.bits.resp := dResp okB.bits.user :<= out.d.bits.user // AXI4 needs irrevocable behaviour in.r <> Queue.irrevocable(okR, 1, flow = true) in.b <> Queue.irrevocable(okB, 1, flow = true) // Unused channels out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B /* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */ def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = { val lReqType = reqType.toLowerCase when(a.valid) { assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U) // Narrow transfers and FIXED bursts must be single-beat bursts. when(a.bits.len =/= 0.U) { assert( a.bits.size === log2Ceil(beatBytes).U, s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)", 1.U << a.bits.size, a.bits.len + 1.U ) assert( a.bits.burst =/= AXI4Parameters.BURST_FIXED, s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)", a.bits.len + 1.U ) } // Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in // particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink // Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts. } } checkRequest(in.ar, "Read") checkRequest(in.aw, "Write") } } } object UnsafeAXI4ToTL { def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = { val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt)) axi42tl.node } } /* ReservableListBuffer logic, and associated classes. */ class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle { val index = UInt(params.entryBits.W) val count = UInt(params.beatBits.W) val numBeats1 = UInt(params.beatBits.W) } class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle { val listIndex = UInt(params.listBits.W) } /** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */ abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends Module { require(params.numEntries > 0) require(params.numLists > 0) val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W)))) val ioReservedIndex = IO(Output(UInt(params.entryBits.W))) val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params)))) val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params))) } /** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve * linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the * 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a * given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order. * * ==Constructor== * @param gen Chisel type of linked list data element * @param params Other parameters * * ==Module IO== * @param ioReserve Index of list to reserve a new element in * @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire' * @param ioResponse Payload containing response data and linked-list-entry index * @param ioDataOut Payload containing data read from response linked list and linked list index */ class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { val valid = RegInit(0.U(params.numLists.W)) val head = Mem(params.numLists, UInt(params.entryBits.W)) val tail = Mem(params.numLists, UInt(params.entryBits.W)) val used = RegInit(0.U(params.numEntries.W)) val next = Mem(params.numEntries, UInt(params.entryBits.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) } val dataIsPresent = RegInit(0.U(params.numEntries.W)) val beats = Mem(params.numEntries, UInt(params.beatBits.W)) // The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower. val dataMemReadEnable = WireDefault(false.B) val dataMemWriteEnable = WireDefault(false.B) assert(!(dataMemReadEnable && dataMemWriteEnable)) // 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the // lowest-index entry in the 'data' RAM which is free. val freeOH = Wire(UInt(params.numEntries.W)) val freeIndex = OHToUInt(freeOH) freeOH := ~(leftOR(~used) << 1) & ~used ioReservedIndex := freeIndex val validSet = WireDefault(0.U(params.numLists.W)) val validClr = WireDefault(0.U(params.numLists.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) val dataIsPresentSet = WireDefault(0.U(params.numEntries.W)) val dataIsPresentClr = WireDefault(0.U(params.numEntries.W)) valid := (valid & ~validClr) | validSet used := (used & ~usedClr) | usedSet dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet /* Reservation logic signals */ val reserveTail = Wire(UInt(params.entryBits.W)) val reserveIsValid = Wire(Bool()) /* Response logic signals */ val responseIndex = Wire(UInt(params.entryBits.W)) val responseListIndex = Wire(UInt(params.listBits.W)) val responseHead = Wire(UInt(params.entryBits.W)) val responseTail = Wire(UInt(params.entryBits.W)) val nextResponseHead = Wire(UInt(params.entryBits.W)) val nextDataIsPresent = Wire(Bool()) val isResponseInOrder = Wire(Bool()) val isEndOfList = Wire(Bool()) val isLastBeat = Wire(Bool()) val isLastResponseBeat = Wire(Bool()) val isLastUnwindBeat = Wire(Bool()) /* Reservation logic */ reserveTail := tail.read(ioReserve.bits) reserveIsValid := valid(ioReserve.bits) ioReserve.ready := !used.andR // When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we // actually start a new list, rather than appending to a list that's about to disappear. val reserveResponseSameList = ioReserve.bits === responseListIndex val appendToAndDestroyList = ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat when(ioReserve.fire) { validSet := UIntToOH(ioReserve.bits, params.numLists) usedSet := freeOH when(reserveIsValid && !appendToAndDestroyList) { next.write(reserveTail, freeIndex) }.otherwise { head.write(ioReserve.bits, freeIndex) } tail.write(ioReserve.bits, freeIndex) map.write(freeIndex, ioReserve.bits) } /* Response logic */ // The majority of the response logic (reading from and writing to the various RAMs) is common between the // response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid). // The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the // 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and // response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after // two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker. responseHead := head.read(responseListIndex) responseTail := tail.read(responseListIndex) nextResponseHead := next.read(responseIndex) nextDataIsPresent := dataIsPresent(nextResponseHead) // Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since // there isn't a next element in the linked list. isResponseInOrder := responseHead === responseIndex isEndOfList := responseHead === responseTail isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1 // When a response's last beat is sent to the output channel, mark it as completed. This can happen in two // situations: // 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM // reservation was never needed. // 2. An entry is read out of the 'data' SRAM (within the unwind FSM). when(ioDataOut.fire && isLastBeat) { // Mark the reservation as no-longer-used. usedClr := UIntToOH(responseIndex, params.numEntries) // If the response is in-order, then we're popping an element from this linked list. when(isEndOfList) { // Once we pop the last element from a linked list, mark it as no-longer-present. validClr := UIntToOH(responseListIndex, params.numLists) }.otherwise { // Move the linked list's head pointer to the new head pointer. head.write(responseListIndex, nextResponseHead) } } // If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding. when(ioResponse.fire && !isResponseInOrder) { dataMemWriteEnable := true.B when(isLastResponseBeat) { dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries) beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1) } } // Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to. val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats) (responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) => when(select && dataMemWriteEnable) { seqMem.write(ioResponse.bits.index, ioResponse.bits.data) } } /* Response unwind logic */ // Unwind FSM state definitions val sIdle :: sUnwinding :: Nil = Enum(2) val unwindState = RegInit(sIdle) val busyUnwinding = unwindState === sUnwinding val startUnwind = Wire(Bool()) val stopUnwind = Wire(Bool()) when(startUnwind) { unwindState := sUnwinding }.elsewhen(stopUnwind) { unwindState := sIdle } assert(!(startUnwind && stopUnwind)) // Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to // become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is // invalid. // // Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to // worry about overwriting the 'data' SRAM's output when we start the unwind FSM. startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent // Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of // two things happens: // 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent) // 2. There are no more outstanding responses in this list (isEndOfList) // // Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are // passing from 'ioResponse' to 'ioDataOut'. stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList) val isUnwindBurstOver = Wire(Bool()) val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable) // Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of // beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we // increment 'beatCounter' until it reaches 'unwindBeats1'. val unwindBeats1 = Reg(UInt(params.beatBits.W)) val nextBeatCounter = Wire(UInt(params.beatBits.W)) val beatCounter = RegNext(nextBeatCounter) isUnwindBurstOver := beatCounter === unwindBeats1 when(startNewBurst) { unwindBeats1 := beats.read(nextResponseHead) nextBeatCounter := 0.U }.elsewhen(dataMemReadEnable) { nextBeatCounter := beatCounter + 1.U }.otherwise { nextBeatCounter := beatCounter } // When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next // entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which // happens at the start of reading a new stored burst). val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst) responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index) // Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the // SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead // holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'. val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex) // The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid // until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle). val unwindDataIsValid = RegInit(false.B) when(dataMemReadEnable) { unwindDataIsValid := true.B }.elsewhen(ioDataOut.fire) { unwindDataIsValid := false.B } isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid // Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats. isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat) // Select which SRAM to read from based on the beat counter. val dataOutputVec = Wire(Vec(params.numBeats, gen)) val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats) (nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) => dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable) } // Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured // by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading // from. val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable) // Mark 'data' burst entries as no-longer-present as they get read out of the SRAM. when(dataMemReadEnable) { dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries) } // As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue // a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know // we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be // consumed by the output channel). val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem) // While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need // 'responseListIndex' to be coherent for the entire unwind process. val rawResponseListIndex = map.read(responseIndex) val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst) responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex) // Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are // just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that // could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be // single-ported. ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding // Either pass an in-order response to the output channel, or data read from the unwind FSM. ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder) ioDataOut.bits.listIndex := responseListIndex ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data) // It's an error to get a response that isn't associated with a valid linked list. when(ioResponse.fire || unwindDataIsValid) { assert( valid(responseListIndex), "No linked list exists at index %d, mapped from %d", responseListIndex, responseIndex ) } when(busyUnwinding && dataMemReadEnable) { assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order") } } /** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1. * * Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to * reorder any responses, or store any linked lists. */ class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1") val used = RegInit(0.U(params.numEntries.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) used := (used & ~usedClr) | usedSet ioReserve.ready := used === 0.U // Store which list index was reserved, we need to return this value when we get a response. when(ioReserve.fire) { usedSet := 1.U map.write(0.U, ioReserve.bits) } // There's only one valid linked list entry, which is at index 0. ioReservedIndex := 0.U val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1 // Mark the linked list as empty when we get the last beat in a response. // Note that 'ioResponse.fire === ioDataOut.fire'. when(ioResponse.fire && isLastResponseBeat) { usedClr := 1.U } // Always pass the response data straight through, since we never need to reorder the response data. ioDataOut.bits.listIndex := map.read(0.U) ioDataOut.bits.payload := ioResponse.bits.data ioDataOut.valid := ioResponse.valid ioResponse.ready := ioDataOut.ready }
module dataMems_254( // @[UnsafeAXI4ToTL.scala:365:62] input [4:0] R0_addr, input R0_en, input R0_clk, output [66:0] R0_data, input [4:0] W0_addr, input W0_en, input W0_clk, input [66:0] W0_data ); dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62] .R0_addr (R0_addr), .R0_en (R0_en), .R0_clk (R0_clk), .R0_data (R0_data), .W0_addr (W0_addr), .W0_en (W0_en), .W0_clk (W0_clk), .W0_data (W0_data) ); // @[UnsafeAXI4ToTL.scala:365:62] endmodule
Generate the Verilog code corresponding to the following Chisel files. File EgressUnit.scala: package constellation.router import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.util._ import constellation.channel._ import constellation.routing.{FlowRoutingBundle} class EgressUnit(coupleSAVA: Boolean, combineSAST: Boolean, inParams: Seq[ChannelParams], ingressParams: Seq[IngressChannelParams], cParam: EgressChannelParams) (implicit p: Parameters) extends AbstractOutputUnit(inParams, ingressParams, cParam)(p) { class EgressUnitIO extends AbstractOutputUnitIO(inParams, ingressParams, cParam) { val out = Decoupled(new EgressFlit(cParam.payloadBits)) } val io = IO(new EgressUnitIO) val channel_empty = RegInit(true.B) val flow = Reg(new FlowRoutingBundle) val q = Module(new Queue(new EgressFlit(cParam.payloadBits), 3 - (if (combineSAST) 1 else 0), flow=true)) q.io.enq.valid := io.in(0).valid q.io.enq.bits.head := io.in(0).bits.head q.io.enq.bits.tail := io.in(0).bits.tail val flows = cParam.possibleFlows.toSeq if (flows.size == 0) { q.io.enq.bits.ingress_id := 0.U(1.W) } else { q.io.enq.bits.ingress_id := Mux1H( flows.map(f => (f.ingressNode.U === io.in(0).bits.flow.ingress_node && f.ingressNodeId.U === io.in(0).bits.flow.ingress_node_id)), flows.map(f => f.ingressId.U(ingressIdBits.W)) ) } q.io.enq.bits.payload := io.in(0).bits.payload io.out <> q.io.deq assert(!(q.io.enq.valid && !q.io.enq.ready)) io.credit_available(0) := q.io.count === 0.U io.channel_status(0).occupied := !channel_empty io.channel_status(0).flow := flow when (io.credit_alloc(0).alloc && io.credit_alloc(0).tail) { channel_empty := true.B if (coupleSAVA) io.channel_status(0).occupied := false.B } when (io.allocs(0).alloc) { channel_empty := false.B flow := io.allocs(0).flow } }
module EgressUnit_13( // @[EgressUnit.scala:12:7] input clock, // @[EgressUnit.scala:12:7] input reset, // @[EgressUnit.scala:12:7] input io_in_0_valid, // @[EgressUnit.scala:18:14] input io_in_0_bits_head, // @[EgressUnit.scala:18:14] input io_in_0_bits_tail, // @[EgressUnit.scala:18:14] input [72:0] io_in_0_bits_payload, // @[EgressUnit.scala:18:14] input [4:0] io_in_0_bits_flow_ingress_node, // @[EgressUnit.scala:18:14] input [1:0] io_in_0_bits_flow_ingress_node_id, // @[EgressUnit.scala:18:14] output io_credit_available_0, // @[EgressUnit.scala:18:14] output io_channel_status_0_occupied, // @[EgressUnit.scala:18:14] input io_allocs_0_alloc, // @[EgressUnit.scala:18:14] input io_credit_alloc_0_alloc, // @[EgressUnit.scala:18:14] input io_credit_alloc_0_tail, // @[EgressUnit.scala:18:14] output io_out_valid, // @[EgressUnit.scala:18:14] output io_out_bits_head, // @[EgressUnit.scala:18:14] output io_out_bits_tail, // @[EgressUnit.scala:18:14] output [72:0] io_out_bits_payload // @[EgressUnit.scala:18:14] ); wire _q_io_enq_ready; // @[EgressUnit.scala:22:17] wire [1:0] _q_io_count; // @[EgressUnit.scala:22:17] reg channel_empty; // @[EgressUnit.scala:20:30] wire _q_io_enq_bits_ingress_id_T_22 = io_in_0_bits_flow_ingress_node_id == 2'h2; // @[EgressUnit.scala:32:27]
Generate the Verilog code corresponding to the following Chisel files. File util.scala: //****************************************************************************** // Copyright (c) 2015 - 2019, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // Utility Functions //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.util import chisel3._ import chisel3.util._ import freechips.rocketchip.rocket.Instructions._ import freechips.rocketchip.rocket._ import freechips.rocketchip.util.{Str} import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.tile.{TileKey} import boom.v3.common.{MicroOp} import boom.v3.exu.{BrUpdateInfo} /** * Object to XOR fold a input register of fullLength into a compressedLength. */ object Fold { def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = { val clen = compressedLength val hlen = fullLength if (hlen <= clen) { input } else { var res = 0.U(clen.W) var remaining = input.asUInt for (i <- 0 to hlen-1 by clen) { val len = if (i + clen > hlen ) (hlen - i) else clen require(len > 0) res = res(clen-1,0) ^ remaining(len-1,0) remaining = remaining >> len.U } res } } } /** * Object to check if MicroOp was killed due to a branch mispredict. * Uses "Fast" branch masks */ object IsKilledByBranch { def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask) } def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop_mask) } } /** * Object to return new MicroOp with a new BR mask given a MicroOp mask * and old BR mask. */ object GetNewUopAndBrMask { def apply(uop: MicroOp, brupdate: BrUpdateInfo) (implicit p: Parameters): MicroOp = { val newuop = WireInit(uop) newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask newuop } } /** * Object to return a BR mask given a MicroOp mask and old BR mask. */ object GetNewBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = { return uop.br_mask & ~brupdate.b1.resolve_mask } def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = { return br_mask & ~brupdate.b1.resolve_mask } } object UpdateBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = { val out = WireInit(uop) out.br_mask := GetNewBrMask(brupdate, uop) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = { val out = WireInit(bundle) out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = { val out = WireInit(bundle) out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask) out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask) out } } /** * Object to check if at least 1 bit matches in two masks */ object maskMatch { def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U } /** * Object to clear one bit in a mask given an index */ object clearMaskBit { def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0) } /** * Object to shift a register over by one bit and concat a new one */ object PerformShiftRegister { def apply(reg_val: UInt, new_bit: Bool): UInt = { reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt reg_val } } /** * Object to shift a register over by one bit, wrapping the top bit around to the bottom * (XOR'ed with a new-bit), and evicting a bit at index HLEN. * This is used to simulate a longer HLEN-width shift register that is folded * down to a compressed CLEN. */ object PerformCircularShiftRegister { def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = { val carry = csr(clen-1) val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U) newval } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapAdd { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, amt: UInt, n: Int): UInt = { if (isPow2(n)) { (value + amt)(log2Ceil(n)-1,0) } else { val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt) Mux(sum >= n.U, sum - n.U, sum) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapSub { // "n" is the number of increments, so we wrap to n-1. def apply(value: UInt, amt: Int, n: Int): UInt = { if (isPow2(n)) { (value - amt.U)(log2Ceil(n)-1,0) } else { val v = Cat(0.U(1.W), value) val b = Cat(0.U(1.W), amt.U) Mux(value >= amt.U, value - amt.U, n.U - amt.U + value) } } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapInc { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value + 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === (n-1).U) Mux(wrap, 0.U, value + 1.U) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapDec { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value - 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === 0.U) Mux(wrap, (n-1).U, value - 1.U) } } } /** * Object to mask off lower bits of a PC to align to a "b" * Byte boundary. */ object AlignPCToBoundary { def apply(pc: UInt, b: Int): UInt = { // Invert for scenario where pc longer than b // (which would clear all bits above size(b)). ~(~pc | (b-1).U) } } /** * Object to rotate a signal left by one */ object RotateL1 { def apply(signal: UInt): UInt = { val w = signal.getWidth val out = Cat(signal(w-2,0), signal(w-1)) return out } } /** * Object to sext a value to a particular length. */ object Sext { def apply(x: UInt, length: Int): UInt = { if (x.getWidth == length) return x else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x) } } /** * Object to translate from BOOM's special "packed immediate" to a 32b signed immediate * Asking for U-type gives it shifted up 12 bits. */ object ImmGen { import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U} def apply(ip: UInt, isel: UInt): SInt = { val sign = ip(LONGEST_IMM_SZ-1).asSInt val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign) val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign) val i11 = Mux(isel === IS_U, 0.S, Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign)) val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt) val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt) val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S) return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt } } /** * Object to get the FP rounding mode out of a packed immediate. */ object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } } /** * Object to get the FP function fype from a packed immediate. * Note: only works if !(IS_B or IS_S) */ object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } } /** * Object to see if an instruction is a JALR. */ object DebugIsJALR { def apply(inst: UInt): Bool = { // TODO Chisel not sure why this won't compile // val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)), // Array( // JALR -> Bool(true))) inst(6,0) === "b1100111".U } } /** * Object to take an instruction and output its branch or jal target. Only used * for a debug assert (no where else would we jump straight from instruction * bits to a target). */ object DebugGetBJImm { def apply(inst: UInt): UInt = { // TODO Chisel not sure why this won't compile //val csignals = //rocket.DecodeLogic(inst, // List(Bool(false), Bool(false)), // Array( // BEQ -> List(Bool(true ), Bool(false)), // BNE -> List(Bool(true ), Bool(false)), // BGE -> List(Bool(true ), Bool(false)), // BGEU -> List(Bool(true ), Bool(false)), // BLT -> List(Bool(true ), Bool(false)), // BLTU -> List(Bool(true ), Bool(false)) // )) //val is_br :: nothing :: Nil = csignals val is_br = (inst(6,0) === "b1100011".U) val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W)) val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W)) Mux(is_br, br_targ, jal_targ) } } /** * Object to return the lowest bit position after the head. */ object AgePriorityEncoder { def apply(in: Seq[Bool], head: UInt): UInt = { val n = in.size val width = log2Ceil(in.size) val n_padded = 1 << width val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in val idx = PriorityEncoder(temp_vec) idx(width-1, 0) //discard msb } } /** * Object to determine whether queue * index i0 is older than index i1. */ object IsOlder { def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head)) } /** * Set all bits at or below the highest order '1'. */ object MaskLower { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => in >> i.U).reduce(_|_) } } /** * Set all bits at or above the lowest order '1'. */ object MaskUpper { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_) } } /** * Transpose a matrix of Chisel Vecs. */ object Transpose { def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = { val n = in(0).size VecInit((0 until n).map(i => VecInit(in.map(row => row(i))))) } } /** * N-wide one-hot priority encoder. */ object SelectFirstN { def apply(in: UInt, n: Int) = { val sels = Wire(Vec(n, UInt(in.getWidth.W))) var mask = in for (i <- 0 until n) { sels(i) := PriorityEncoderOH(mask) mask = mask & ~sels(i) } sels } } /** * Connect the first k of n valid input interfaces to k output interfaces. */ class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module { require(n >= k) val io = IO(new Bundle { val in = Vec(n, Flipped(DecoupledIO(gen))) val out = Vec(k, DecoupledIO(gen)) }) if (n == k) { io.out <> io.in } else { val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c)) val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col => (col zip io.in.map(_.valid)) map {case (c,v) => c && v}) val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_)) val out_valids = sels map (col => col.reduce(_||_)) val out_data = sels map (s => Mux1H(s, io.in.map(_.bits))) in_readys zip io.in foreach {case (r,i) => i.ready := r} out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d} } } /** * Create a queue that can be killed with a branch kill signal. * Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq). */ class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true) (implicit p: org.chipsalliance.cde.config.Parameters) extends boom.v3.common.BoomModule()(p) with boom.v3.common.HasBoomCoreParameters { val io = IO(new Bundle { val enq = Flipped(Decoupled(gen)) val deq = Decoupled(gen) val brupdate = Input(new BrUpdateInfo()) val flush = Input(Bool()) val empty = Output(Bool()) val count = Output(UInt(log2Ceil(entries).W)) }) val ram = Mem(entries, gen) val valids = RegInit(VecInit(Seq.fill(entries) {false.B})) val uops = Reg(Vec(entries, new MicroOp)) val enq_ptr = Counter(entries) val deq_ptr = Counter(entries) val maybe_full = RegInit(false.B) val ptr_match = enq_ptr.value === deq_ptr.value io.empty := ptr_match && !maybe_full val full = ptr_match && maybe_full val do_enq = WireInit(io.enq.fire) val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty) for (i <- 0 until entries) { val mask = uops(i).br_mask val uop = uops(i) valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop)) when (valids(i)) { uops(i).br_mask := GetNewBrMask(io.brupdate, mask) } } when (do_enq) { ram(enq_ptr.value) := io.enq.bits valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop) uops(enq_ptr.value) := io.enq.bits.uop uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) enq_ptr.inc() } when (do_deq) { valids(deq_ptr.value) := false.B deq_ptr.inc() } when (do_enq =/= do_deq) { maybe_full := do_enq } io.enq.ready := !full val out = Wire(gen) out := ram(deq_ptr.value) out.uop := uops(deq_ptr.value) io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop)) io.deq.bits := out io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop) // For flow queue behavior. if (flow) { when (io.empty) { io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop) io.deq.bits := io.enq.bits io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) do_deq := false.B when (io.deq.ready) { do_enq := false.B } } } private val ptr_diff = enq_ptr.value - deq_ptr.value if (isPow2(entries)) { io.count := Cat(maybe_full && ptr_match, ptr_diff) } else { io.count := Mux(ptr_match, Mux(maybe_full, entries.asUInt, 0.U), Mux(deq_ptr.value > enq_ptr.value, entries.asUInt + ptr_diff, ptr_diff)) } } // ------------------------------------------ // Printf helper functions // ------------------------------------------ object BoolToChar { /** * Take in a Chisel Bool and convert it into a Str * based on the Chars given * * @param c_bool Chisel Bool * @param trueChar Scala Char if bool is true * @param falseChar Scala Char if bool is false * @return UInt ASCII Char for "trueChar" or "falseChar" */ def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = { Mux(c_bool, Str(trueChar), Str(falseChar)) } } object CfiTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param cfi_type specific cfi type * @return Vec of Strs (must be indexed to get specific char) */ def apply(cfi_type: UInt) = { val strings = Seq("----", "BR ", "JAL ", "JALR") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(cfi_type) } } object BpdTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param bpd_type specific bpd type * @return Vec of Strs (must be indexed to get specific char) */ def apply(bpd_type: UInt) = { val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(bpd_type) } } object RobTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param rob_type specific rob type * @return Vec of Strs (must be indexed to get specific char) */ def apply(rob_type: UInt) = { val strings = Seq("RST", "NML", "RBK", " WT") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(rob_type) } } object XRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param xreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(xreg: UInt) = { val strings = Seq(" x0", " ra", " sp", " gp", " tp", " t0", " t1", " t2", " s0", " s1", " a0", " a1", " a2", " a3", " a4", " a5", " a6", " a7", " s2", " s3", " s4", " s5", " s6", " s7", " s8", " s9", "s10", "s11", " t3", " t4", " t5", " t6") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(xreg) } } object FPRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param fpreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(fpreg: UInt) = { val strings = Seq(" ft0", " ft1", " ft2", " ft3", " ft4", " ft5", " ft6", " ft7", " fs0", " fs1", " fa0", " fa1", " fa2", " fa3", " fa4", " fa5", " fa6", " fa7", " fs2", " fs3", " fs4", " fs5", " fs6", " fs7", " fs8", " fs9", "fs10", "fs11", " ft8", " ft9", "ft10", "ft11") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(fpreg) } } object BoomCoreStringPrefix { /** * Add prefix to BOOM strings (currently only adds the hartId) * * @param strs list of strings * @return String combining the list with the prefix per line */ def apply(strs: String*)(implicit p: Parameters) = { val prefix = "[C" + s"${p(TileKey).tileId}" + "] " strs.map(str => prefix + str + "\n").mkString("") } } File consts.scala: //****************************************************************************** // Copyright (c) 2011 - 2018, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // RISCV Processor Constants //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.common.constants import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util.Str import freechips.rocketchip.rocket.RVCExpander /** * Mixin for issue queue types */ trait IQType { val IQT_SZ = 3 val IQT_INT = 1.U(IQT_SZ.W) val IQT_MEM = 2.U(IQT_SZ.W) val IQT_FP = 4.U(IQT_SZ.W) val IQT_MFP = 6.U(IQT_SZ.W) } /** * Mixin for scalar operation constants */ trait ScalarOpConstants { val X = BitPat("b?") val Y = BitPat("b1") val N = BitPat("b0") //************************************ // Extra Constants // Which branch predictor predicted us val BSRC_SZ = 2 val BSRC_1 = 0.U(BSRC_SZ.W) // 1-cycle branch pred val BSRC_2 = 1.U(BSRC_SZ.W) // 2-cycle branch pred val BSRC_3 = 2.U(BSRC_SZ.W) // 3-cycle branch pred val BSRC_C = 3.U(BSRC_SZ.W) // core branch resolution //************************************ // Control Signals // CFI types val CFI_SZ = 3 val CFI_X = 0.U(CFI_SZ.W) // Not a CFI instruction val CFI_BR = 1.U(CFI_SZ.W) // Branch val CFI_JAL = 2.U(CFI_SZ.W) // JAL val CFI_JALR = 3.U(CFI_SZ.W) // JALR // PC Select Signal val PC_PLUS4 = 0.U(2.W) // PC + 4 val PC_BRJMP = 1.U(2.W) // brjmp_target val PC_JALR = 2.U(2.W) // jump_reg_target // Branch Type val BR_N = 0.U(4.W) // Next val BR_NE = 1.U(4.W) // Branch on NotEqual val BR_EQ = 2.U(4.W) // Branch on Equal val BR_GE = 3.U(4.W) // Branch on Greater/Equal val BR_GEU = 4.U(4.W) // Branch on Greater/Equal Unsigned val BR_LT = 5.U(4.W) // Branch on Less Than val BR_LTU = 6.U(4.W) // Branch on Less Than Unsigned val BR_J = 7.U(4.W) // Jump val BR_JR = 8.U(4.W) // Jump Register // RS1 Operand Select Signal val OP1_RS1 = 0.U(2.W) // Register Source #1 val OP1_ZERO= 1.U(2.W) val OP1_PC = 2.U(2.W) val OP1_X = BitPat("b??") // RS2 Operand Select Signal val OP2_RS2 = 0.U(3.W) // Register Source #2 val OP2_IMM = 1.U(3.W) // immediate val OP2_ZERO= 2.U(3.W) // constant 0 val OP2_NEXT= 3.U(3.W) // constant 2/4 (for PC+2/4) val OP2_IMMC= 4.U(3.W) // for CSR imm found in RS1 val OP2_X = BitPat("b???") // Register File Write Enable Signal val REN_0 = false.B val REN_1 = true.B // Is 32b Word or 64b Doubldword? val SZ_DW = 1 val DW_X = true.B // Bool(xLen==64) val DW_32 = false.B val DW_64 = true.B val DW_XPR = true.B // Bool(xLen==64) // Memory Enable Signal val MEN_0 = false.B val MEN_1 = true.B val MEN_X = false.B // Immediate Extend Select val IS_I = 0.U(3.W) // I-Type (LD,ALU) val IS_S = 1.U(3.W) // S-Type (ST) val IS_B = 2.U(3.W) // SB-Type (BR) val IS_U = 3.U(3.W) // U-Type (LUI/AUIPC) val IS_J = 4.U(3.W) // UJ-Type (J/JAL) val IS_X = BitPat("b???") // Decode Stage Control Signals val RT_FIX = 0.U(2.W) val RT_FLT = 1.U(2.W) val RT_PAS = 3.U(2.W) // pass-through (prs1 := lrs1, etc) val RT_X = 2.U(2.W) // not-a-register (but shouldn't get a busy-bit, etc.) // TODO rename RT_NAR // Micro-op opcodes // TODO change micro-op opcodes into using enum val UOPC_SZ = 7 val uopX = BitPat.dontCare(UOPC_SZ) val uopNOP = 0.U(UOPC_SZ.W) val uopLD = 1.U(UOPC_SZ.W) val uopSTA = 2.U(UOPC_SZ.W) // store address generation val uopSTD = 3.U(UOPC_SZ.W) // store data generation val uopLUI = 4.U(UOPC_SZ.W) val uopADDI = 5.U(UOPC_SZ.W) val uopANDI = 6.U(UOPC_SZ.W) val uopORI = 7.U(UOPC_SZ.W) val uopXORI = 8.U(UOPC_SZ.W) val uopSLTI = 9.U(UOPC_SZ.W) val uopSLTIU= 10.U(UOPC_SZ.W) val uopSLLI = 11.U(UOPC_SZ.W) val uopSRAI = 12.U(UOPC_SZ.W) val uopSRLI = 13.U(UOPC_SZ.W) val uopSLL = 14.U(UOPC_SZ.W) val uopADD = 15.U(UOPC_SZ.W) val uopSUB = 16.U(UOPC_SZ.W) val uopSLT = 17.U(UOPC_SZ.W) val uopSLTU = 18.U(UOPC_SZ.W) val uopAND = 19.U(UOPC_SZ.W) val uopOR = 20.U(UOPC_SZ.W) val uopXOR = 21.U(UOPC_SZ.W) val uopSRA = 22.U(UOPC_SZ.W) val uopSRL = 23.U(UOPC_SZ.W) val uopBEQ = 24.U(UOPC_SZ.W) val uopBNE = 25.U(UOPC_SZ.W) val uopBGE = 26.U(UOPC_SZ.W) val uopBGEU = 27.U(UOPC_SZ.W) val uopBLT = 28.U(UOPC_SZ.W) val uopBLTU = 29.U(UOPC_SZ.W) val uopCSRRW= 30.U(UOPC_SZ.W) val uopCSRRS= 31.U(UOPC_SZ.W) val uopCSRRC= 32.U(UOPC_SZ.W) val uopCSRRWI=33.U(UOPC_SZ.W) val uopCSRRSI=34.U(UOPC_SZ.W) val uopCSRRCI=35.U(UOPC_SZ.W) val uopJ = 36.U(UOPC_SZ.W) val uopJAL = 37.U(UOPC_SZ.W) val uopJALR = 38.U(UOPC_SZ.W) val uopAUIPC= 39.U(UOPC_SZ.W) //val uopSRET = 40.U(UOPC_SZ.W) val uopCFLSH= 41.U(UOPC_SZ.W) val uopFENCE= 42.U(UOPC_SZ.W) val uopADDIW= 43.U(UOPC_SZ.W) val uopADDW = 44.U(UOPC_SZ.W) val uopSUBW = 45.U(UOPC_SZ.W) val uopSLLIW= 46.U(UOPC_SZ.W) val uopSLLW = 47.U(UOPC_SZ.W) val uopSRAIW= 48.U(UOPC_SZ.W) val uopSRAW = 49.U(UOPC_SZ.W) val uopSRLIW= 50.U(UOPC_SZ.W) val uopSRLW = 51.U(UOPC_SZ.W) val uopMUL = 52.U(UOPC_SZ.W) val uopMULH = 53.U(UOPC_SZ.W) val uopMULHU= 54.U(UOPC_SZ.W) val uopMULHSU=55.U(UOPC_SZ.W) val uopMULW = 56.U(UOPC_SZ.W) val uopDIV = 57.U(UOPC_SZ.W) val uopDIVU = 58.U(UOPC_SZ.W) val uopREM = 59.U(UOPC_SZ.W) val uopREMU = 60.U(UOPC_SZ.W) val uopDIVW = 61.U(UOPC_SZ.W) val uopDIVUW= 62.U(UOPC_SZ.W) val uopREMW = 63.U(UOPC_SZ.W) val uopREMUW= 64.U(UOPC_SZ.W) val uopFENCEI = 65.U(UOPC_SZ.W) // = 66.U(UOPC_SZ.W) val uopAMO_AG = 67.U(UOPC_SZ.W) // AMO-address gen (use normal STD for datagen) val uopFMV_W_X = 68.U(UOPC_SZ.W) val uopFMV_D_X = 69.U(UOPC_SZ.W) val uopFMV_X_W = 70.U(UOPC_SZ.W) val uopFMV_X_D = 71.U(UOPC_SZ.W) val uopFSGNJ_S = 72.U(UOPC_SZ.W) val uopFSGNJ_D = 73.U(UOPC_SZ.W) val uopFCVT_S_D = 74.U(UOPC_SZ.W) val uopFCVT_D_S = 75.U(UOPC_SZ.W) val uopFCVT_S_X = 76.U(UOPC_SZ.W) val uopFCVT_D_X = 77.U(UOPC_SZ.W) val uopFCVT_X_S = 78.U(UOPC_SZ.W) val uopFCVT_X_D = 79.U(UOPC_SZ.W) val uopCMPR_S = 80.U(UOPC_SZ.W) val uopCMPR_D = 81.U(UOPC_SZ.W) val uopFCLASS_S = 82.U(UOPC_SZ.W) val uopFCLASS_D = 83.U(UOPC_SZ.W) val uopFMINMAX_S = 84.U(UOPC_SZ.W) val uopFMINMAX_D = 85.U(UOPC_SZ.W) // = 86.U(UOPC_SZ.W) val uopFADD_S = 87.U(UOPC_SZ.W) val uopFSUB_S = 88.U(UOPC_SZ.W) val uopFMUL_S = 89.U(UOPC_SZ.W) val uopFADD_D = 90.U(UOPC_SZ.W) val uopFSUB_D = 91.U(UOPC_SZ.W) val uopFMUL_D = 92.U(UOPC_SZ.W) val uopFMADD_S = 93.U(UOPC_SZ.W) val uopFMSUB_S = 94.U(UOPC_SZ.W) val uopFNMADD_S = 95.U(UOPC_SZ.W) val uopFNMSUB_S = 96.U(UOPC_SZ.W) val uopFMADD_D = 97.U(UOPC_SZ.W) val uopFMSUB_D = 98.U(UOPC_SZ.W) val uopFNMADD_D = 99.U(UOPC_SZ.W) val uopFNMSUB_D = 100.U(UOPC_SZ.W) val uopFDIV_S = 101.U(UOPC_SZ.W) val uopFDIV_D = 102.U(UOPC_SZ.W) val uopFSQRT_S = 103.U(UOPC_SZ.W) val uopFSQRT_D = 104.U(UOPC_SZ.W) val uopWFI = 105.U(UOPC_SZ.W) // pass uop down the CSR pipeline val uopERET = 106.U(UOPC_SZ.W) // pass uop down the CSR pipeline, also is ERET val uopSFENCE = 107.U(UOPC_SZ.W) val uopROCC = 108.U(UOPC_SZ.W) val uopMOV = 109.U(UOPC_SZ.W) // conditional mov decoded from "add rd, x0, rs2" // The Bubble Instruction (Machine generated NOP) // Insert (XOR x0,x0,x0) which is different from software compiler // generated NOPs which are (ADDI x0, x0, 0). // Reasoning for this is to let visualizers and stat-trackers differentiate // between software NOPs and machine-generated Bubbles in the pipeline. val BUBBLE = (0x4033).U(32.W) def NullMicroOp()(implicit p: Parameters): boom.v3.common.MicroOp = { val uop = Wire(new boom.v3.common.MicroOp) uop := DontCare // Overridden in the following lines uop.uopc := uopNOP // maybe not required, but helps on asserts that try to catch spurious behavior uop.bypassable := false.B uop.fp_val := false.B uop.uses_stq := false.B uop.uses_ldq := false.B uop.pdst := 0.U uop.dst_rtype := RT_X val cs = Wire(new boom.v3.common.CtrlSignals()) cs := DontCare // Overridden in the following lines cs.br_type := BR_N cs.csr_cmd := freechips.rocketchip.rocket.CSR.N cs.is_load := false.B cs.is_sta := false.B cs.is_std := false.B uop.ctrl := cs uop } } /** * Mixin for RISCV constants */ trait RISCVConstants { // abstract out instruction decode magic numbers val RD_MSB = 11 val RD_LSB = 7 val RS1_MSB = 19 val RS1_LSB = 15 val RS2_MSB = 24 val RS2_LSB = 20 val RS3_MSB = 31 val RS3_LSB = 27 val CSR_ADDR_MSB = 31 val CSR_ADDR_LSB = 20 val CSR_ADDR_SZ = 12 // location of the fifth bit in the shamt (for checking for illegal ops for SRAIW,etc.) val SHAMT_5_BIT = 25 val LONGEST_IMM_SZ = 20 val X0 = 0.U val RA = 1.U // return address register // memory consistency model // The C/C++ atomics MCM requires that two loads to the same address maintain program order. // The Cortex A9 does NOT enforce load/load ordering (which leads to buggy behavior). val MCM_ORDER_DEPENDENT_LOADS = true val jal_opc = (0x6f).U val jalr_opc = (0x67).U def GetUop(inst: UInt): UInt = inst(6,0) def GetRd (inst: UInt): UInt = inst(RD_MSB,RD_LSB) def GetRs1(inst: UInt): UInt = inst(RS1_MSB,RS1_LSB) def ExpandRVC(inst: UInt)(implicit p: Parameters): UInt = { val rvc_exp = Module(new RVCExpander) rvc_exp.io.in := inst Mux(rvc_exp.io.rvc, rvc_exp.io.out.bits, inst) } // Note: Accepts only EXPANDED rvc instructions def ComputeBranchTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = { val b_imm32 = Cat(Fill(20,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W)) ((pc.asSInt + b_imm32.asSInt).asSInt & (-2).S).asUInt } // Note: Accepts only EXPANDED rvc instructions def ComputeJALTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = { val j_imm32 = Cat(Fill(12,inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W)) ((pc.asSInt + j_imm32.asSInt).asSInt & (-2).S).asUInt } // Note: Accepts only EXPANDED rvc instructions def GetCfiType(inst: UInt)(implicit p: Parameters): UInt = { val bdecode = Module(new boom.v3.exu.BranchDecode) bdecode.io.inst := inst bdecode.io.pc := 0.U bdecode.io.out.cfi_type } } /** * Mixin for exception cause constants */ trait ExcCauseConstants { // a memory disambigious misspeculation occurred val MINI_EXCEPTION_MEM_ORDERING = 16.U val MINI_EXCEPTION_CSR_REPLAY = 17.U require (!freechips.rocketchip.rocket.Causes.all.contains(16)) require (!freechips.rocketchip.rocket.Causes.all.contains(17)) } File issue-slot.scala: //****************************************************************************** // Copyright (c) 2015 - 2018, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // RISCV Processor Issue Slot Logic //-------------------------------------------------------------------------- //------------------------------------------------------------------------------ // // Note: stores (and AMOs) are "broken down" into 2 uops, but stored within a single issue-slot. // TODO XXX make a separate issueSlot for MemoryIssueSlots, and only they break apart stores. // TODO Disable ldspec for FP queue. package boom.v3.exu import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import boom.v3.common._ import boom.v3.util._ import FUConstants._ /** * IO bundle to interact with Issue slot * * @param numWakeupPorts number of wakeup ports for the slot */ class IssueSlotIO(val numWakeupPorts: Int)(implicit p: Parameters) extends BoomBundle { val valid = Output(Bool()) val will_be_valid = Output(Bool()) // TODO code review, do we need this signal so explicitely? val request = Output(Bool()) val request_hp = Output(Bool()) val grant = Input(Bool()) val brupdate = Input(new BrUpdateInfo()) val kill = Input(Bool()) // pipeline flush val clear = Input(Bool()) // entry being moved elsewhere (not mutually exclusive with grant) val ldspec_miss = Input(Bool()) // Previous cycle's speculative load wakeup was mispredicted. val wakeup_ports = Flipped(Vec(numWakeupPorts, Valid(new IqWakeup(maxPregSz)))) val pred_wakeup_port = Flipped(Valid(UInt(log2Ceil(ftqSz).W))) val spec_ld_wakeup = Flipped(Vec(memWidth, Valid(UInt(width=maxPregSz.W)))) val in_uop = Flipped(Valid(new MicroOp())) // if valid, this WILL overwrite an entry! val out_uop = Output(new MicroOp()) // the updated slot uop; will be shifted upwards in a collasping queue. val uop = Output(new MicroOp()) // the current Slot's uop. Sent down the pipeline when issued. val debug = { val result = new Bundle { val p1 = Bool() val p2 = Bool() val p3 = Bool() val ppred = Bool() val state = UInt(width=2.W) } Output(result) } } /** * Single issue slot. Holds a uop within the issue queue * * @param numWakeupPorts number of wakeup ports */ class IssueSlot(val numWakeupPorts: Int)(implicit p: Parameters) extends BoomModule with IssueUnitConstants { val io = IO(new IssueSlotIO(numWakeupPorts)) // slot invalid? // slot is valid, holding 1 uop // slot is valid, holds 2 uops (like a store) def is_invalid = state === s_invalid def is_valid = state =/= s_invalid val next_state = Wire(UInt()) // the next state of this slot (which might then get moved to a new slot) val next_uopc = Wire(UInt()) // the next uopc of this slot (which might then get moved to a new slot) val next_lrs1_rtype = Wire(UInt()) // the next reg type of this slot (which might then get moved to a new slot) val next_lrs2_rtype = Wire(UInt()) // the next reg type of this slot (which might then get moved to a new slot) val state = RegInit(s_invalid) val p1 = RegInit(false.B) val p2 = RegInit(false.B) val p3 = RegInit(false.B) val ppred = RegInit(false.B) // Poison if woken up by speculative load. // Poison lasts 1 cycle (as ldMiss will come on the next cycle). // SO if poisoned is true, set it to false! val p1_poisoned = RegInit(false.B) val p2_poisoned = RegInit(false.B) p1_poisoned := false.B p2_poisoned := false.B val next_p1_poisoned = Mux(io.in_uop.valid, io.in_uop.bits.iw_p1_poisoned, p1_poisoned) val next_p2_poisoned = Mux(io.in_uop.valid, io.in_uop.bits.iw_p2_poisoned, p2_poisoned) val slot_uop = RegInit(NullMicroOp) val next_uop = Mux(io.in_uop.valid, io.in_uop.bits, slot_uop) //----------------------------------------------------------------------------- // next slot state computation // compute the next state for THIS entry slot (in a collasping queue, the // current uop may get moved elsewhere, and a new uop can enter when (io.kill) { state := s_invalid } .elsewhen (io.in_uop.valid) { state := io.in_uop.bits.iw_state } .elsewhen (io.clear) { state := s_invalid } .otherwise { state := next_state } //----------------------------------------------------------------------------- // "update" state // compute the next state for the micro-op in this slot. This micro-op may // be moved elsewhere, so the "next_state" travels with it. // defaults next_state := state next_uopc := slot_uop.uopc next_lrs1_rtype := slot_uop.lrs1_rtype next_lrs2_rtype := slot_uop.lrs2_rtype when (io.kill) { next_state := s_invalid } .elsewhen ((io.grant && (state === s_valid_1)) || (io.grant && (state === s_valid_2) && p1 && p2 && ppred)) { // try to issue this uop. when (!(io.ldspec_miss && (p1_poisoned || p2_poisoned))) { next_state := s_invalid } } .elsewhen (io.grant && (state === s_valid_2)) { when (!(io.ldspec_miss && (p1_poisoned || p2_poisoned))) { next_state := s_valid_1 when (p1) { slot_uop.uopc := uopSTD next_uopc := uopSTD slot_uop.lrs1_rtype := RT_X next_lrs1_rtype := RT_X } .otherwise { slot_uop.lrs2_rtype := RT_X next_lrs2_rtype := RT_X } } } when (io.in_uop.valid) { slot_uop := io.in_uop.bits assert (is_invalid || io.clear || io.kill, "trying to overwrite a valid issue slot.") } // Wakeup Compare Logic // these signals are the "next_p*" for the current slot's micro-op. // they are important for shifting the current slot_uop up to an other entry. val next_p1 = WireInit(p1) val next_p2 = WireInit(p2) val next_p3 = WireInit(p3) val next_ppred = WireInit(ppred) when (io.in_uop.valid) { p1 := !(io.in_uop.bits.prs1_busy) p2 := !(io.in_uop.bits.prs2_busy) p3 := !(io.in_uop.bits.prs3_busy) ppred := !(io.in_uop.bits.ppred_busy) } when (io.ldspec_miss && next_p1_poisoned) { assert(next_uop.prs1 =/= 0.U, "Poison bit can't be set for prs1=x0!") p1 := false.B } when (io.ldspec_miss && next_p2_poisoned) { assert(next_uop.prs2 =/= 0.U, "Poison bit can't be set for prs2=x0!") p2 := false.B } for (i <- 0 until numWakeupPorts) { when (io.wakeup_ports(i).valid && (io.wakeup_ports(i).bits.pdst === next_uop.prs1)) { p1 := true.B } when (io.wakeup_ports(i).valid && (io.wakeup_ports(i).bits.pdst === next_uop.prs2)) { p2 := true.B } when (io.wakeup_ports(i).valid && (io.wakeup_ports(i).bits.pdst === next_uop.prs3)) { p3 := true.B } } when (io.pred_wakeup_port.valid && io.pred_wakeup_port.bits === next_uop.ppred) { ppred := true.B } for (w <- 0 until memWidth) { assert (!(io.spec_ld_wakeup(w).valid && io.spec_ld_wakeup(w).bits === 0.U), "Loads to x0 should never speculatively wakeup other instructions") } // TODO disable if FP IQ. for (w <- 0 until memWidth) { when (io.spec_ld_wakeup(w).valid && io.spec_ld_wakeup(w).bits === next_uop.prs1 && next_uop.lrs1_rtype === RT_FIX) { p1 := true.B p1_poisoned := true.B assert (!next_p1_poisoned) } when (io.spec_ld_wakeup(w).valid && io.spec_ld_wakeup(w).bits === next_uop.prs2 && next_uop.lrs2_rtype === RT_FIX) { p2 := true.B p2_poisoned := true.B assert (!next_p2_poisoned) } } // Handle branch misspeculations val next_br_mask = GetNewBrMask(io.brupdate, slot_uop) // was this micro-op killed by a branch? if yes, we can't let it be valid if // we compact it into an other entry when (IsKilledByBranch(io.brupdate, slot_uop)) { next_state := s_invalid } when (!io.in_uop.valid) { slot_uop.br_mask := next_br_mask } //------------------------------------------------------------- // Request Logic io.request := is_valid && p1 && p2 && p3 && ppred && !io.kill val high_priority = slot_uop.is_br || slot_uop.is_jal || slot_uop.is_jalr io.request_hp := io.request && high_priority when (state === s_valid_1) { io.request := p1 && p2 && p3 && ppred && !io.kill } .elsewhen (state === s_valid_2) { io.request := (p1 || p2) && ppred && !io.kill } .otherwise { io.request := false.B } //assign outputs io.valid := is_valid io.uop := slot_uop io.uop.iw_p1_poisoned := p1_poisoned io.uop.iw_p2_poisoned := p2_poisoned // micro-op will vacate due to grant. val may_vacate = io.grant && ((state === s_valid_1) || (state === s_valid_2) && p1 && p2 && ppred) val squash_grant = io.ldspec_miss && (p1_poisoned || p2_poisoned) io.will_be_valid := is_valid && !(may_vacate && !squash_grant) io.out_uop := slot_uop io.out_uop.iw_state := next_state io.out_uop.uopc := next_uopc io.out_uop.lrs1_rtype := next_lrs1_rtype io.out_uop.lrs2_rtype := next_lrs2_rtype io.out_uop.br_mask := next_br_mask io.out_uop.prs1_busy := !p1 io.out_uop.prs2_busy := !p2 io.out_uop.prs3_busy := !p3 io.out_uop.ppred_busy := !ppred io.out_uop.iw_p1_poisoned := p1_poisoned io.out_uop.iw_p2_poisoned := p2_poisoned when (state === s_valid_2) { when (p1 && p2 && ppred) { ; // send out the entire instruction as one uop } .elsewhen (p1 && ppred) { io.uop.uopc := slot_uop.uopc io.uop.lrs2_rtype := RT_X } .elsewhen (p2 && ppred) { io.uop.uopc := uopSTD io.uop.lrs1_rtype := RT_X } } // debug outputs io.debug.p1 := p1 io.debug.p2 := p2 io.debug.p3 := p3 io.debug.ppred := ppred io.debug.state := state }
module IssueSlot_38( // @[issue-slot.scala:69:7] input clock, // @[issue-slot.scala:69:7] input reset, // @[issue-slot.scala:69:7] output io_valid, // @[issue-slot.scala:73:14] output io_will_be_valid, // @[issue-slot.scala:73:14] output io_request, // @[issue-slot.scala:73:14] output io_request_hp, // @[issue-slot.scala:73:14] input io_grant, // @[issue-slot.scala:73:14] input [7:0] io_brupdate_b1_resolve_mask, // @[issue-slot.scala:73:14] input [7:0] io_brupdate_b1_mispredict_mask, // @[issue-slot.scala:73:14] input [6:0] io_brupdate_b2_uop_uopc, // @[issue-slot.scala:73:14] input [31:0] io_brupdate_b2_uop_inst, // @[issue-slot.scala:73:14] input [31:0] io_brupdate_b2_uop_debug_inst, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_rvc, // @[issue-slot.scala:73:14] input [39:0] io_brupdate_b2_uop_debug_pc, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_uop_iq_type, // @[issue-slot.scala:73:14] input [9:0] io_brupdate_b2_uop_fu_code, // @[issue-slot.scala:73:14] input [3:0] io_brupdate_b2_uop_ctrl_br_type, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_ctrl_op1_sel, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_uop_ctrl_op2_sel, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_uop_ctrl_imm_sel, // @[issue-slot.scala:73:14] input [4:0] io_brupdate_b2_uop_ctrl_op_fcn, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ctrl_fcn_dw, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_uop_ctrl_csr_cmd, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ctrl_is_load, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ctrl_is_sta, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ctrl_is_std, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_iw_state, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_iw_p1_poisoned, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_iw_p2_poisoned, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_br, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_jalr, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_jal, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_sfb, // @[issue-slot.scala:73:14] input [7:0] io_brupdate_b2_uop_br_mask, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_uop_br_tag, // @[issue-slot.scala:73:14] input [3:0] io_brupdate_b2_uop_ftq_idx, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_edge_inst, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_pc_lob, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_taken, // @[issue-slot.scala:73:14] input [19:0] io_brupdate_b2_uop_imm_packed, // @[issue-slot.scala:73:14] input [11:0] io_brupdate_b2_uop_csr_addr, // @[issue-slot.scala:73:14] input [4:0] io_brupdate_b2_uop_rob_idx, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_uop_ldq_idx, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_uop_stq_idx, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_rxq_idx, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_pdst, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_prs1, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_prs2, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_prs3, // @[issue-slot.scala:73:14] input [3:0] io_brupdate_b2_uop_ppred, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_prs1_busy, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_prs2_busy, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_prs3_busy, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ppred_busy, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_stale_pdst, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_exception, // @[issue-slot.scala:73:14] input [63:0] io_brupdate_b2_uop_exc_cause, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_bypassable, // @[issue-slot.scala:73:14] input [4:0] io_brupdate_b2_uop_mem_cmd, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_mem_size, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_mem_signed, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_fence, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_fencei, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_amo, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_uses_ldq, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_uses_stq, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_sys_pc2epc, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_unique, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_flush_on_commit, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ldst_is_rs1, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_ldst, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_lrs1, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_lrs2, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_lrs3, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ldst_val, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_dst_rtype, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_frs3_en, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_fp_val, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_fp_single, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_xcpt_pf_if, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_xcpt_ae_if, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_xcpt_ma_if, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_bp_debug_if, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_bp_xcpt_if, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_debug_fsrc, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_debug_tsrc, // @[issue-slot.scala:73:14] input io_brupdate_b2_valid, // @[issue-slot.scala:73:14] input io_brupdate_b2_mispredict, // @[issue-slot.scala:73:14] input io_brupdate_b2_taken, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_cfi_type, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_pc_sel, // @[issue-slot.scala:73:14] input [39:0] io_brupdate_b2_jalr_target, // @[issue-slot.scala:73:14] input [20:0] io_brupdate_b2_target_offset, // @[issue-slot.scala:73:14] input io_kill, // @[issue-slot.scala:73:14] input io_clear, // @[issue-slot.scala:73:14] input io_ldspec_miss, // @[issue-slot.scala:73:14] input io_wakeup_ports_0_valid, // @[issue-slot.scala:73:14] input [5:0] io_wakeup_ports_0_bits_pdst, // @[issue-slot.scala:73:14] input io_wakeup_ports_0_bits_poisoned, // @[issue-slot.scala:73:14] input io_wakeup_ports_1_valid, // @[issue-slot.scala:73:14] input [5:0] io_wakeup_ports_1_bits_pdst, // @[issue-slot.scala:73:14] input io_wakeup_ports_1_bits_poisoned, // @[issue-slot.scala:73:14] input io_wakeup_ports_2_valid, // @[issue-slot.scala:73:14] input [5:0] io_wakeup_ports_2_bits_pdst, // @[issue-slot.scala:73:14] input io_wakeup_ports_2_bits_poisoned, // @[issue-slot.scala:73:14] input io_spec_ld_wakeup_0_valid, // @[issue-slot.scala:73:14] input [5:0] io_spec_ld_wakeup_0_bits, // @[issue-slot.scala:73:14] input io_in_uop_valid, // @[issue-slot.scala:73:14] input [6:0] io_in_uop_bits_uopc, // @[issue-slot.scala:73:14] input [31:0] io_in_uop_bits_inst, // @[issue-slot.scala:73:14] input [31:0] io_in_uop_bits_debug_inst, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_rvc, // @[issue-slot.scala:73:14] input [39:0] io_in_uop_bits_debug_pc, // @[issue-slot.scala:73:14] input [2:0] io_in_uop_bits_iq_type, // @[issue-slot.scala:73:14] input [9:0] io_in_uop_bits_fu_code, // @[issue-slot.scala:73:14] input [3:0] io_in_uop_bits_ctrl_br_type, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_ctrl_op1_sel, // @[issue-slot.scala:73:14] input [2:0] io_in_uop_bits_ctrl_op2_sel, // @[issue-slot.scala:73:14] input [2:0] io_in_uop_bits_ctrl_imm_sel, // @[issue-slot.scala:73:14] input [4:0] io_in_uop_bits_ctrl_op_fcn, // @[issue-slot.scala:73:14] input io_in_uop_bits_ctrl_fcn_dw, // @[issue-slot.scala:73:14] input [2:0] io_in_uop_bits_ctrl_csr_cmd, // @[issue-slot.scala:73:14] input io_in_uop_bits_ctrl_is_load, // @[issue-slot.scala:73:14] input io_in_uop_bits_ctrl_is_sta, // @[issue-slot.scala:73:14] input io_in_uop_bits_ctrl_is_std, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_iw_state, // @[issue-slot.scala:73:14] input io_in_uop_bits_iw_p1_poisoned, // @[issue-slot.scala:73:14] input io_in_uop_bits_iw_p2_poisoned, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_br, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_jalr, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_jal, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_sfb, // @[issue-slot.scala:73:14] input [7:0] io_in_uop_bits_br_mask, // @[issue-slot.scala:73:14] input [2:0] io_in_uop_bits_br_tag, // @[issue-slot.scala:73:14] input [3:0] io_in_uop_bits_ftq_idx, // @[issue-slot.scala:73:14] input io_in_uop_bits_edge_inst, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_pc_lob, // @[issue-slot.scala:73:14] input io_in_uop_bits_taken, // @[issue-slot.scala:73:14] input [19:0] io_in_uop_bits_imm_packed, // @[issue-slot.scala:73:14] input [11:0] io_in_uop_bits_csr_addr, // @[issue-slot.scala:73:14] input [4:0] io_in_uop_bits_rob_idx, // @[issue-slot.scala:73:14] input [2:0] io_in_uop_bits_ldq_idx, // @[issue-slot.scala:73:14] input [2:0] io_in_uop_bits_stq_idx, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_rxq_idx, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_pdst, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_prs1, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_prs2, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_prs3, // @[issue-slot.scala:73:14] input io_in_uop_bits_prs1_busy, // @[issue-slot.scala:73:14] input io_in_uop_bits_prs2_busy, // @[issue-slot.scala:73:14] input io_in_uop_bits_prs3_busy, // @[issue-slot.scala:73:14] input io_in_uop_bits_ppred_busy, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_stale_pdst, // @[issue-slot.scala:73:14] input io_in_uop_bits_exception, // @[issue-slot.scala:73:14] input [63:0] io_in_uop_bits_exc_cause, // @[issue-slot.scala:73:14] input io_in_uop_bits_bypassable, // @[issue-slot.scala:73:14] input [4:0] io_in_uop_bits_mem_cmd, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_mem_size, // @[issue-slot.scala:73:14] input io_in_uop_bits_mem_signed, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_fence, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_fencei, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_amo, // @[issue-slot.scala:73:14] input io_in_uop_bits_uses_ldq, // @[issue-slot.scala:73:14] input io_in_uop_bits_uses_stq, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_sys_pc2epc, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_unique, // @[issue-slot.scala:73:14] input io_in_uop_bits_flush_on_commit, // @[issue-slot.scala:73:14] input io_in_uop_bits_ldst_is_rs1, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_ldst, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_lrs1, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_lrs2, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_lrs3, // @[issue-slot.scala:73:14] input io_in_uop_bits_ldst_val, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_dst_rtype, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_lrs1_rtype, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_lrs2_rtype, // @[issue-slot.scala:73:14] input io_in_uop_bits_frs3_en, // @[issue-slot.scala:73:14] input io_in_uop_bits_fp_val, // @[issue-slot.scala:73:14] input io_in_uop_bits_fp_single, // @[issue-slot.scala:73:14] input io_in_uop_bits_xcpt_pf_if, // @[issue-slot.scala:73:14] input io_in_uop_bits_xcpt_ae_if, // @[issue-slot.scala:73:14] input io_in_uop_bits_xcpt_ma_if, // @[issue-slot.scala:73:14] input io_in_uop_bits_bp_debug_if, // @[issue-slot.scala:73:14] input io_in_uop_bits_bp_xcpt_if, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_debug_fsrc, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_debug_tsrc, // @[issue-slot.scala:73:14] output [6:0] io_out_uop_uopc, // @[issue-slot.scala:73:14] output [31:0] io_out_uop_inst, // @[issue-slot.scala:73:14] output [31:0] io_out_uop_debug_inst, // @[issue-slot.scala:73:14] output io_out_uop_is_rvc, // @[issue-slot.scala:73:14] output [39:0] io_out_uop_debug_pc, // @[issue-slot.scala:73:14] output [2:0] io_out_uop_iq_type, // @[issue-slot.scala:73:14] output [9:0] io_out_uop_fu_code, // @[issue-slot.scala:73:14] output [3:0] io_out_uop_ctrl_br_type, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_ctrl_op1_sel, // @[issue-slot.scala:73:14] output [2:0] io_out_uop_ctrl_op2_sel, // @[issue-slot.scala:73:14] output [2:0] io_out_uop_ctrl_imm_sel, // @[issue-slot.scala:73:14] output [4:0] io_out_uop_ctrl_op_fcn, // @[issue-slot.scala:73:14] output io_out_uop_ctrl_fcn_dw, // @[issue-slot.scala:73:14] output [2:0] io_out_uop_ctrl_csr_cmd, // @[issue-slot.scala:73:14] output io_out_uop_ctrl_is_load, // @[issue-slot.scala:73:14] output io_out_uop_ctrl_is_sta, // @[issue-slot.scala:73:14] output io_out_uop_ctrl_is_std, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_iw_state, // @[issue-slot.scala:73:14] output io_out_uop_iw_p1_poisoned, // @[issue-slot.scala:73:14] output io_out_uop_iw_p2_poisoned, // @[issue-slot.scala:73:14] output io_out_uop_is_br, // @[issue-slot.scala:73:14] output io_out_uop_is_jalr, // @[issue-slot.scala:73:14] output io_out_uop_is_jal, // @[issue-slot.scala:73:14] output io_out_uop_is_sfb, // @[issue-slot.scala:73:14] output [7:0] io_out_uop_br_mask, // @[issue-slot.scala:73:14] output [2:0] io_out_uop_br_tag, // @[issue-slot.scala:73:14] output [3:0] io_out_uop_ftq_idx, // @[issue-slot.scala:73:14] output io_out_uop_edge_inst, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_pc_lob, // @[issue-slot.scala:73:14] output io_out_uop_taken, // @[issue-slot.scala:73:14] output [19:0] io_out_uop_imm_packed, // @[issue-slot.scala:73:14] output [11:0] io_out_uop_csr_addr, // @[issue-slot.scala:73:14] output [4:0] io_out_uop_rob_idx, // @[issue-slot.scala:73:14] output [2:0] io_out_uop_ldq_idx, // @[issue-slot.scala:73:14] output [2:0] io_out_uop_stq_idx, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_rxq_idx, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_pdst, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_prs1, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_prs2, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_prs3, // @[issue-slot.scala:73:14] output [3:0] io_out_uop_ppred, // @[issue-slot.scala:73:14] output io_out_uop_prs1_busy, // @[issue-slot.scala:73:14] output io_out_uop_prs2_busy, // @[issue-slot.scala:73:14] output io_out_uop_prs3_busy, // @[issue-slot.scala:73:14] output io_out_uop_ppred_busy, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_stale_pdst, // @[issue-slot.scala:73:14] output io_out_uop_exception, // @[issue-slot.scala:73:14] output [63:0] io_out_uop_exc_cause, // @[issue-slot.scala:73:14] output io_out_uop_bypassable, // @[issue-slot.scala:73:14] output [4:0] io_out_uop_mem_cmd, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_mem_size, // @[issue-slot.scala:73:14] output io_out_uop_mem_signed, // @[issue-slot.scala:73:14] output io_out_uop_is_fence, // @[issue-slot.scala:73:14] output io_out_uop_is_fencei, // @[issue-slot.scala:73:14] output io_out_uop_is_amo, // @[issue-slot.scala:73:14] output io_out_uop_uses_ldq, // @[issue-slot.scala:73:14] output io_out_uop_uses_stq, // @[issue-slot.scala:73:14] output io_out_uop_is_sys_pc2epc, // @[issue-slot.scala:73:14] output io_out_uop_is_unique, // @[issue-slot.scala:73:14] output io_out_uop_flush_on_commit, // @[issue-slot.scala:73:14] output io_out_uop_ldst_is_rs1, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_ldst, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_lrs1, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_lrs2, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_lrs3, // @[issue-slot.scala:73:14] output io_out_uop_ldst_val, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_dst_rtype, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_lrs1_rtype, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_lrs2_rtype, // @[issue-slot.scala:73:14] output io_out_uop_frs3_en, // @[issue-slot.scala:73:14] output io_out_uop_fp_val, // @[issue-slot.scala:73:14] output io_out_uop_fp_single, // @[issue-slot.scala:73:14] output io_out_uop_xcpt_pf_if, // @[issue-slot.scala:73:14] output io_out_uop_xcpt_ae_if, // @[issue-slot.scala:73:14] output io_out_uop_xcpt_ma_if, // @[issue-slot.scala:73:14] output io_out_uop_bp_debug_if, // @[issue-slot.scala:73:14] output io_out_uop_bp_xcpt_if, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_debug_fsrc, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_debug_tsrc, // @[issue-slot.scala:73:14] output [6:0] io_uop_uopc, // @[issue-slot.scala:73:14] output [31:0] io_uop_inst, // @[issue-slot.scala:73:14] output [31:0] io_uop_debug_inst, // @[issue-slot.scala:73:14] output io_uop_is_rvc, // @[issue-slot.scala:73:14] output [39:0] io_uop_debug_pc, // @[issue-slot.scala:73:14] output [2:0] io_uop_iq_type, // @[issue-slot.scala:73:14] output [9:0] io_uop_fu_code, // @[issue-slot.scala:73:14] output [3:0] io_uop_ctrl_br_type, // @[issue-slot.scala:73:14] output [1:0] io_uop_ctrl_op1_sel, // @[issue-slot.scala:73:14] output [2:0] io_uop_ctrl_op2_sel, // @[issue-slot.scala:73:14] output [2:0] io_uop_ctrl_imm_sel, // @[issue-slot.scala:73:14] output [4:0] io_uop_ctrl_op_fcn, // @[issue-slot.scala:73:14] output io_uop_ctrl_fcn_dw, // @[issue-slot.scala:73:14] output [2:0] io_uop_ctrl_csr_cmd, // @[issue-slot.scala:73:14] output io_uop_ctrl_is_load, // @[issue-slot.scala:73:14] output io_uop_ctrl_is_sta, // @[issue-slot.scala:73:14] output io_uop_ctrl_is_std, // @[issue-slot.scala:73:14] output [1:0] io_uop_iw_state, // @[issue-slot.scala:73:14] output io_uop_iw_p1_poisoned, // @[issue-slot.scala:73:14] output io_uop_iw_p2_poisoned, // @[issue-slot.scala:73:14] output io_uop_is_br, // @[issue-slot.scala:73:14] output io_uop_is_jalr, // @[issue-slot.scala:73:14] output io_uop_is_jal, // @[issue-slot.scala:73:14] output io_uop_is_sfb, // @[issue-slot.scala:73:14] output [7:0] io_uop_br_mask, // @[issue-slot.scala:73:14] output [2:0] io_uop_br_tag, // @[issue-slot.scala:73:14] output [3:0] io_uop_ftq_idx, // @[issue-slot.scala:73:14] output io_uop_edge_inst, // @[issue-slot.scala:73:14] output [5:0] io_uop_pc_lob, // @[issue-slot.scala:73:14] output io_uop_taken, // @[issue-slot.scala:73:14] output [19:0] io_uop_imm_packed, // @[issue-slot.scala:73:14] output [11:0] io_uop_csr_addr, // @[issue-slot.scala:73:14] output [4:0] io_uop_rob_idx, // @[issue-slot.scala:73:14] output [2:0] io_uop_ldq_idx, // @[issue-slot.scala:73:14] output [2:0] io_uop_stq_idx, // @[issue-slot.scala:73:14] output [1:0] io_uop_rxq_idx, // @[issue-slot.scala:73:14] output [5:0] io_uop_pdst, // @[issue-slot.scala:73:14] output [5:0] io_uop_prs1, // @[issue-slot.scala:73:14] output [5:0] io_uop_prs2, // @[issue-slot.scala:73:14] output [5:0] io_uop_prs3, // @[issue-slot.scala:73:14] output [3:0] io_uop_ppred, // @[issue-slot.scala:73:14] output io_uop_prs1_busy, // @[issue-slot.scala:73:14] output io_uop_prs2_busy, // @[issue-slot.scala:73:14] output io_uop_prs3_busy, // @[issue-slot.scala:73:14] output io_uop_ppred_busy, // @[issue-slot.scala:73:14] output [5:0] io_uop_stale_pdst, // @[issue-slot.scala:73:14] output io_uop_exception, // @[issue-slot.scala:73:14] output [63:0] io_uop_exc_cause, // @[issue-slot.scala:73:14] output io_uop_bypassable, // @[issue-slot.scala:73:14] output [4:0] io_uop_mem_cmd, // @[issue-slot.scala:73:14] output [1:0] io_uop_mem_size, // @[issue-slot.scala:73:14] output io_uop_mem_signed, // @[issue-slot.scala:73:14] output io_uop_is_fence, // @[issue-slot.scala:73:14] output io_uop_is_fencei, // @[issue-slot.scala:73:14] output io_uop_is_amo, // @[issue-slot.scala:73:14] output io_uop_uses_ldq, // @[issue-slot.scala:73:14] output io_uop_uses_stq, // @[issue-slot.scala:73:14] output io_uop_is_sys_pc2epc, // @[issue-slot.scala:73:14] output io_uop_is_unique, // @[issue-slot.scala:73:14] output io_uop_flush_on_commit, // @[issue-slot.scala:73:14] output io_uop_ldst_is_rs1, // @[issue-slot.scala:73:14] output [5:0] io_uop_ldst, // @[issue-slot.scala:73:14] output [5:0] io_uop_lrs1, // @[issue-slot.scala:73:14] output [5:0] io_uop_lrs2, // @[issue-slot.scala:73:14] output [5:0] io_uop_lrs3, // @[issue-slot.scala:73:14] output io_uop_ldst_val, // @[issue-slot.scala:73:14] output [1:0] io_uop_dst_rtype, // @[issue-slot.scala:73:14] output [1:0] io_uop_lrs1_rtype, // @[issue-slot.scala:73:14] output [1:0] io_uop_lrs2_rtype, // @[issue-slot.scala:73:14] output io_uop_frs3_en, // @[issue-slot.scala:73:14] output io_uop_fp_val, // @[issue-slot.scala:73:14] output io_uop_fp_single, // @[issue-slot.scala:73:14] output io_uop_xcpt_pf_if, // @[issue-slot.scala:73:14] output io_uop_xcpt_ae_if, // @[issue-slot.scala:73:14] output io_uop_xcpt_ma_if, // @[issue-slot.scala:73:14] output io_uop_bp_debug_if, // @[issue-slot.scala:73:14] output io_uop_bp_xcpt_if, // @[issue-slot.scala:73:14] output [1:0] io_uop_debug_fsrc, // @[issue-slot.scala:73:14] output [1:0] io_uop_debug_tsrc, // @[issue-slot.scala:73:14] output io_debug_p1, // @[issue-slot.scala:73:14] output io_debug_p2, // @[issue-slot.scala:73:14] output io_debug_p3, // @[issue-slot.scala:73:14] output io_debug_ppred, // @[issue-slot.scala:73:14] output [1:0] io_debug_state // @[issue-slot.scala:73:14] ); wire io_grant_0 = io_grant; // @[issue-slot.scala:69:7] wire [7:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[issue-slot.scala:69:7] wire [7:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[issue-slot.scala:69:7] wire [6:0] io_brupdate_b2_uop_uopc_0 = io_brupdate_b2_uop_uopc; // @[issue-slot.scala:69:7] wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[issue-slot.scala:69:7] wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[issue-slot.scala:69:7] wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_uop_iq_type_0 = io_brupdate_b2_uop_iq_type; // @[issue-slot.scala:69:7] wire [9:0] io_brupdate_b2_uop_fu_code_0 = io_brupdate_b2_uop_fu_code; // @[issue-slot.scala:69:7] wire [3:0] io_brupdate_b2_uop_ctrl_br_type_0 = io_brupdate_b2_uop_ctrl_br_type; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel_0 = io_brupdate_b2_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel_0 = io_brupdate_b2_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel_0 = io_brupdate_b2_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7] wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn_0 = io_brupdate_b2_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ctrl_fcn_dw_0 = io_brupdate_b2_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd_0 = io_brupdate_b2_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ctrl_is_load_0 = io_brupdate_b2_uop_ctrl_is_load; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ctrl_is_sta_0 = io_brupdate_b2_uop_ctrl_is_sta; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ctrl_is_std_0 = io_brupdate_b2_uop_ctrl_is_std; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_iw_state_0 = io_brupdate_b2_uop_iw_state; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_iw_p1_poisoned_0 = io_brupdate_b2_uop_iw_p1_poisoned; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_iw_p2_poisoned_0 = io_brupdate_b2_uop_iw_p2_poisoned; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_br_0 = io_brupdate_b2_uop_is_br; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_jalr_0 = io_brupdate_b2_uop_is_jalr; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_jal_0 = io_brupdate_b2_uop_is_jal; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[issue-slot.scala:69:7] wire [7:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[issue-slot.scala:69:7] wire [3:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[issue-slot.scala:69:7] wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[issue-slot.scala:69:7] wire [11:0] io_brupdate_b2_uop_csr_addr_0 = io_brupdate_b2_uop_csr_addr; // @[issue-slot.scala:69:7] wire [4:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[issue-slot.scala:69:7] wire [3:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[issue-slot.scala:69:7] wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_bypassable_0 = io_brupdate_b2_uop_bypassable; // @[issue-slot.scala:69:7] wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ldst_val_0 = io_brupdate_b2_uop_ldst_val; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_fp_single_0 = io_brupdate_b2_uop_fp_single; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[issue-slot.scala:69:7] wire io_brupdate_b2_valid_0 = io_brupdate_b2_valid; // @[issue-slot.scala:69:7] wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[issue-slot.scala:69:7] wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[issue-slot.scala:69:7] wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[issue-slot.scala:69:7] wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[issue-slot.scala:69:7] wire io_kill_0 = io_kill; // @[issue-slot.scala:69:7] wire io_clear_0 = io_clear; // @[issue-slot.scala:69:7] wire io_ldspec_miss_0 = io_ldspec_miss; // @[issue-slot.scala:69:7] wire io_wakeup_ports_0_valid_0 = io_wakeup_ports_0_valid; // @[issue-slot.scala:69:7] wire [5:0] io_wakeup_ports_0_bits_pdst_0 = io_wakeup_ports_0_bits_pdst; // @[issue-slot.scala:69:7] wire io_wakeup_ports_0_bits_poisoned_0 = io_wakeup_ports_0_bits_poisoned; // @[issue-slot.scala:69:7] wire io_wakeup_ports_1_valid_0 = io_wakeup_ports_1_valid; // @[issue-slot.scala:69:7] wire [5:0] io_wakeup_ports_1_bits_pdst_0 = io_wakeup_ports_1_bits_pdst; // @[issue-slot.scala:69:7] wire io_wakeup_ports_1_bits_poisoned_0 = io_wakeup_ports_1_bits_poisoned; // @[issue-slot.scala:69:7] wire io_wakeup_ports_2_valid_0 = io_wakeup_ports_2_valid; // @[issue-slot.scala:69:7] wire [5:0] io_wakeup_ports_2_bits_pdst_0 = io_wakeup_ports_2_bits_pdst; // @[issue-slot.scala:69:7] wire io_wakeup_ports_2_bits_poisoned_0 = io_wakeup_ports_2_bits_poisoned; // @[issue-slot.scala:69:7] wire io_spec_ld_wakeup_0_valid_0 = io_spec_ld_wakeup_0_valid; // @[issue-slot.scala:69:7] wire [5:0] io_spec_ld_wakeup_0_bits_0 = io_spec_ld_wakeup_0_bits; // @[issue-slot.scala:69:7] wire io_in_uop_valid_0 = io_in_uop_valid; // @[issue-slot.scala:69:7] wire [6:0] io_in_uop_bits_uopc_0 = io_in_uop_bits_uopc; // @[issue-slot.scala:69:7] wire [31:0] io_in_uop_bits_inst_0 = io_in_uop_bits_inst; // @[issue-slot.scala:69:7] wire [31:0] io_in_uop_bits_debug_inst_0 = io_in_uop_bits_debug_inst; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_rvc_0 = io_in_uop_bits_is_rvc; // @[issue-slot.scala:69:7] wire [39:0] io_in_uop_bits_debug_pc_0 = io_in_uop_bits_debug_pc; // @[issue-slot.scala:69:7] wire [2:0] io_in_uop_bits_iq_type_0 = io_in_uop_bits_iq_type; // @[issue-slot.scala:69:7] wire [9:0] io_in_uop_bits_fu_code_0 = io_in_uop_bits_fu_code; // @[issue-slot.scala:69:7] wire [3:0] io_in_uop_bits_ctrl_br_type_0 = io_in_uop_bits_ctrl_br_type; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_ctrl_op1_sel_0 = io_in_uop_bits_ctrl_op1_sel; // @[issue-slot.scala:69:7] wire [2:0] io_in_uop_bits_ctrl_op2_sel_0 = io_in_uop_bits_ctrl_op2_sel; // @[issue-slot.scala:69:7] wire [2:0] io_in_uop_bits_ctrl_imm_sel_0 = io_in_uop_bits_ctrl_imm_sel; // @[issue-slot.scala:69:7] wire [4:0] io_in_uop_bits_ctrl_op_fcn_0 = io_in_uop_bits_ctrl_op_fcn; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ctrl_fcn_dw_0 = io_in_uop_bits_ctrl_fcn_dw; // @[issue-slot.scala:69:7] wire [2:0] io_in_uop_bits_ctrl_csr_cmd_0 = io_in_uop_bits_ctrl_csr_cmd; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ctrl_is_load_0 = io_in_uop_bits_ctrl_is_load; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ctrl_is_sta_0 = io_in_uop_bits_ctrl_is_sta; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ctrl_is_std_0 = io_in_uop_bits_ctrl_is_std; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_iw_state_0 = io_in_uop_bits_iw_state; // @[issue-slot.scala:69:7] wire io_in_uop_bits_iw_p1_poisoned_0 = io_in_uop_bits_iw_p1_poisoned; // @[issue-slot.scala:69:7] wire io_in_uop_bits_iw_p2_poisoned_0 = io_in_uop_bits_iw_p2_poisoned; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_br_0 = io_in_uop_bits_is_br; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_jalr_0 = io_in_uop_bits_is_jalr; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_jal_0 = io_in_uop_bits_is_jal; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_sfb_0 = io_in_uop_bits_is_sfb; // @[issue-slot.scala:69:7] wire [7:0] io_in_uop_bits_br_mask_0 = io_in_uop_bits_br_mask; // @[issue-slot.scala:69:7] wire [2:0] io_in_uop_bits_br_tag_0 = io_in_uop_bits_br_tag; // @[issue-slot.scala:69:7] wire [3:0] io_in_uop_bits_ftq_idx_0 = io_in_uop_bits_ftq_idx; // @[issue-slot.scala:69:7] wire io_in_uop_bits_edge_inst_0 = io_in_uop_bits_edge_inst; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_pc_lob_0 = io_in_uop_bits_pc_lob; // @[issue-slot.scala:69:7] wire io_in_uop_bits_taken_0 = io_in_uop_bits_taken; // @[issue-slot.scala:69:7] wire [19:0] io_in_uop_bits_imm_packed_0 = io_in_uop_bits_imm_packed; // @[issue-slot.scala:69:7] wire [11:0] io_in_uop_bits_csr_addr_0 = io_in_uop_bits_csr_addr; // @[issue-slot.scala:69:7] wire [4:0] io_in_uop_bits_rob_idx_0 = io_in_uop_bits_rob_idx; // @[issue-slot.scala:69:7] wire [2:0] io_in_uop_bits_ldq_idx_0 = io_in_uop_bits_ldq_idx; // @[issue-slot.scala:69:7] wire [2:0] io_in_uop_bits_stq_idx_0 = io_in_uop_bits_stq_idx; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_rxq_idx_0 = io_in_uop_bits_rxq_idx; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_pdst_0 = io_in_uop_bits_pdst; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_prs1_0 = io_in_uop_bits_prs1; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_prs2_0 = io_in_uop_bits_prs2; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_prs3_0 = io_in_uop_bits_prs3; // @[issue-slot.scala:69:7] wire io_in_uop_bits_prs1_busy_0 = io_in_uop_bits_prs1_busy; // @[issue-slot.scala:69:7] wire io_in_uop_bits_prs2_busy_0 = io_in_uop_bits_prs2_busy; // @[issue-slot.scala:69:7] wire io_in_uop_bits_prs3_busy_0 = io_in_uop_bits_prs3_busy; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ppred_busy_0 = io_in_uop_bits_ppred_busy; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_stale_pdst_0 = io_in_uop_bits_stale_pdst; // @[issue-slot.scala:69:7] wire io_in_uop_bits_exception_0 = io_in_uop_bits_exception; // @[issue-slot.scala:69:7] wire [63:0] io_in_uop_bits_exc_cause_0 = io_in_uop_bits_exc_cause; // @[issue-slot.scala:69:7] wire io_in_uop_bits_bypassable_0 = io_in_uop_bits_bypassable; // @[issue-slot.scala:69:7] wire [4:0] io_in_uop_bits_mem_cmd_0 = io_in_uop_bits_mem_cmd; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_mem_size_0 = io_in_uop_bits_mem_size; // @[issue-slot.scala:69:7] wire io_in_uop_bits_mem_signed_0 = io_in_uop_bits_mem_signed; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_fence_0 = io_in_uop_bits_is_fence; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_fencei_0 = io_in_uop_bits_is_fencei; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_amo_0 = io_in_uop_bits_is_amo; // @[issue-slot.scala:69:7] wire io_in_uop_bits_uses_ldq_0 = io_in_uop_bits_uses_ldq; // @[issue-slot.scala:69:7] wire io_in_uop_bits_uses_stq_0 = io_in_uop_bits_uses_stq; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_sys_pc2epc_0 = io_in_uop_bits_is_sys_pc2epc; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_unique_0 = io_in_uop_bits_is_unique; // @[issue-slot.scala:69:7] wire io_in_uop_bits_flush_on_commit_0 = io_in_uop_bits_flush_on_commit; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ldst_is_rs1_0 = io_in_uop_bits_ldst_is_rs1; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_ldst_0 = io_in_uop_bits_ldst; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_lrs1_0 = io_in_uop_bits_lrs1; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_lrs2_0 = io_in_uop_bits_lrs2; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_lrs3_0 = io_in_uop_bits_lrs3; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ldst_val_0 = io_in_uop_bits_ldst_val; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_dst_rtype_0 = io_in_uop_bits_dst_rtype; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_lrs1_rtype_0 = io_in_uop_bits_lrs1_rtype; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_lrs2_rtype_0 = io_in_uop_bits_lrs2_rtype; // @[issue-slot.scala:69:7] wire io_in_uop_bits_frs3_en_0 = io_in_uop_bits_frs3_en; // @[issue-slot.scala:69:7] wire io_in_uop_bits_fp_val_0 = io_in_uop_bits_fp_val; // @[issue-slot.scala:69:7] wire io_in_uop_bits_fp_single_0 = io_in_uop_bits_fp_single; // @[issue-slot.scala:69:7] wire io_in_uop_bits_xcpt_pf_if_0 = io_in_uop_bits_xcpt_pf_if; // @[issue-slot.scala:69:7] wire io_in_uop_bits_xcpt_ae_if_0 = io_in_uop_bits_xcpt_ae_if; // @[issue-slot.scala:69:7] wire io_in_uop_bits_xcpt_ma_if_0 = io_in_uop_bits_xcpt_ma_if; // @[issue-slot.scala:69:7] wire io_in_uop_bits_bp_debug_if_0 = io_in_uop_bits_bp_debug_if; // @[issue-slot.scala:69:7] wire io_in_uop_bits_bp_xcpt_if_0 = io_in_uop_bits_bp_xcpt_if; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_debug_fsrc_0 = io_in_uop_bits_debug_fsrc; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_debug_tsrc_0 = io_in_uop_bits_debug_tsrc; // @[issue-slot.scala:69:7] wire [3:0] io_pred_wakeup_port_bits = 4'h0; // @[issue-slot.scala:69:7] wire [3:0] io_in_uop_bits_ppred = 4'h0; // @[issue-slot.scala:69:7] wire [3:0] slot_uop_uop_ctrl_br_type = 4'h0; // @[consts.scala:269:19] wire [3:0] slot_uop_uop_ftq_idx = 4'h0; // @[consts.scala:269:19] wire [3:0] slot_uop_uop_ppred = 4'h0; // @[consts.scala:269:19] wire [3:0] slot_uop_cs_br_type = 4'h0; // @[consts.scala:279:18] wire io_pred_wakeup_port_valid = 1'h0; // @[issue-slot.scala:69:7] wire slot_uop_uop_is_rvc = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ctrl_fcn_dw = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ctrl_is_load = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ctrl_is_sta = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ctrl_is_std = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_iw_p1_poisoned = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_iw_p2_poisoned = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_br = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_jalr = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_jal = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_sfb = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_edge_inst = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_taken = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_prs1_busy = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_prs2_busy = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_prs3_busy = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ppred_busy = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_exception = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_bypassable = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_mem_signed = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_fence = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_fencei = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_amo = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_uses_ldq = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_uses_stq = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_sys_pc2epc = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_unique = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_flush_on_commit = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ldst_is_rs1 = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ldst_val = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_frs3_en = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_fp_val = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_fp_single = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_xcpt_pf_if = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_xcpt_ae_if = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_xcpt_ma_if = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_bp_debug_if = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_bp_xcpt_if = 1'h0; // @[consts.scala:269:19] wire slot_uop_cs_fcn_dw = 1'h0; // @[consts.scala:279:18] wire slot_uop_cs_is_load = 1'h0; // @[consts.scala:279:18] wire slot_uop_cs_is_sta = 1'h0; // @[consts.scala:279:18] wire slot_uop_cs_is_std = 1'h0; // @[consts.scala:279:18] wire [2:0] slot_uop_uop_iq_type = 3'h0; // @[consts.scala:269:19] wire [2:0] slot_uop_uop_ctrl_op2_sel = 3'h0; // @[consts.scala:269:19] wire [2:0] slot_uop_uop_ctrl_imm_sel = 3'h0; // @[consts.scala:269:19] wire [2:0] slot_uop_uop_ctrl_csr_cmd = 3'h0; // @[consts.scala:269:19] wire [2:0] slot_uop_uop_br_tag = 3'h0; // @[consts.scala:269:19] wire [2:0] slot_uop_uop_ldq_idx = 3'h0; // @[consts.scala:269:19] wire [2:0] slot_uop_uop_stq_idx = 3'h0; // @[consts.scala:269:19] wire [2:0] slot_uop_cs_op2_sel = 3'h0; // @[consts.scala:279:18] wire [2:0] slot_uop_cs_imm_sel = 3'h0; // @[consts.scala:279:18] wire [2:0] slot_uop_cs_csr_cmd = 3'h0; // @[consts.scala:279:18] wire [4:0] slot_uop_uop_ctrl_op_fcn = 5'h0; // @[consts.scala:269:19] wire [4:0] slot_uop_uop_rob_idx = 5'h0; // @[consts.scala:269:19] wire [4:0] slot_uop_uop_mem_cmd = 5'h0; // @[consts.scala:269:19] wire [4:0] slot_uop_cs_op_fcn = 5'h0; // @[consts.scala:279:18] wire [1:0] slot_uop_uop_ctrl_op1_sel = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_iw_state = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_rxq_idx = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_mem_size = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_lrs1_rtype = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_lrs2_rtype = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_debug_fsrc = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_debug_tsrc = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_cs_op1_sel = 2'h0; // @[consts.scala:279:18] wire [1:0] slot_uop_uop_dst_rtype = 2'h2; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_pc_lob = 6'h0; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_pdst = 6'h0; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_prs1 = 6'h0; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_prs2 = 6'h0; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_prs3 = 6'h0; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_stale_pdst = 6'h0; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_ldst = 6'h0; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_lrs1 = 6'h0; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_lrs2 = 6'h0; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_lrs3 = 6'h0; // @[consts.scala:269:19] wire [63:0] slot_uop_uop_exc_cause = 64'h0; // @[consts.scala:269:19] wire [11:0] slot_uop_uop_csr_addr = 12'h0; // @[consts.scala:269:19] wire [19:0] slot_uop_uop_imm_packed = 20'h0; // @[consts.scala:269:19] wire [7:0] slot_uop_uop_br_mask = 8'h0; // @[consts.scala:269:19] wire [9:0] slot_uop_uop_fu_code = 10'h0; // @[consts.scala:269:19] wire [39:0] slot_uop_uop_debug_pc = 40'h0; // @[consts.scala:269:19] wire [31:0] slot_uop_uop_inst = 32'h0; // @[consts.scala:269:19] wire [31:0] slot_uop_uop_debug_inst = 32'h0; // @[consts.scala:269:19] wire [6:0] slot_uop_uop_uopc = 7'h0; // @[consts.scala:269:19] wire _io_valid_T; // @[issue-slot.scala:79:24] wire _io_will_be_valid_T_4; // @[issue-slot.scala:262:32] wire _io_request_hp_T; // @[issue-slot.scala:243:31] wire [6:0] next_uopc; // @[issue-slot.scala:82:29] wire [1:0] next_state; // @[issue-slot.scala:81:29] wire [7:0] next_br_mask; // @[util.scala:85:25] wire _io_out_uop_prs1_busy_T; // @[issue-slot.scala:270:28] wire _io_out_uop_prs2_busy_T; // @[issue-slot.scala:271:28] wire _io_out_uop_prs3_busy_T; // @[issue-slot.scala:272:28] wire _io_out_uop_ppred_busy_T; // @[issue-slot.scala:273:28] wire [1:0] next_lrs1_rtype; // @[issue-slot.scala:83:29] wire [1:0] next_lrs2_rtype; // @[issue-slot.scala:84:29] wire [3:0] io_out_uop_ctrl_br_type_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_ctrl_op1_sel_0; // @[issue-slot.scala:69:7] wire [2:0] io_out_uop_ctrl_op2_sel_0; // @[issue-slot.scala:69:7] wire [2:0] io_out_uop_ctrl_imm_sel_0; // @[issue-slot.scala:69:7] wire [4:0] io_out_uop_ctrl_op_fcn_0; // @[issue-slot.scala:69:7] wire io_out_uop_ctrl_fcn_dw_0; // @[issue-slot.scala:69:7] wire [2:0] io_out_uop_ctrl_csr_cmd_0; // @[issue-slot.scala:69:7] wire io_out_uop_ctrl_is_load_0; // @[issue-slot.scala:69:7] wire io_out_uop_ctrl_is_sta_0; // @[issue-slot.scala:69:7] wire io_out_uop_ctrl_is_std_0; // @[issue-slot.scala:69:7] wire [6:0] io_out_uop_uopc_0; // @[issue-slot.scala:69:7] wire [31:0] io_out_uop_inst_0; // @[issue-slot.scala:69:7] wire [31:0] io_out_uop_debug_inst_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_rvc_0; // @[issue-slot.scala:69:7] wire [39:0] io_out_uop_debug_pc_0; // @[issue-slot.scala:69:7] wire [2:0] io_out_uop_iq_type_0; // @[issue-slot.scala:69:7] wire [9:0] io_out_uop_fu_code_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_iw_state_0; // @[issue-slot.scala:69:7] wire io_out_uop_iw_p1_poisoned_0; // @[issue-slot.scala:69:7] wire io_out_uop_iw_p2_poisoned_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_br_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_jalr_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_jal_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_sfb_0; // @[issue-slot.scala:69:7] wire [7:0] io_out_uop_br_mask_0; // @[issue-slot.scala:69:7] wire [2:0] io_out_uop_br_tag_0; // @[issue-slot.scala:69:7] wire [3:0] io_out_uop_ftq_idx_0; // @[issue-slot.scala:69:7] wire io_out_uop_edge_inst_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_pc_lob_0; // @[issue-slot.scala:69:7] wire io_out_uop_taken_0; // @[issue-slot.scala:69:7] wire [19:0] io_out_uop_imm_packed_0; // @[issue-slot.scala:69:7] wire [11:0] io_out_uop_csr_addr_0; // @[issue-slot.scala:69:7] wire [4:0] io_out_uop_rob_idx_0; // @[issue-slot.scala:69:7] wire [2:0] io_out_uop_ldq_idx_0; // @[issue-slot.scala:69:7] wire [2:0] io_out_uop_stq_idx_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_rxq_idx_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_pdst_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_prs1_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_prs2_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_prs3_0; // @[issue-slot.scala:69:7] wire [3:0] io_out_uop_ppred_0; // @[issue-slot.scala:69:7] wire io_out_uop_prs1_busy_0; // @[issue-slot.scala:69:7] wire io_out_uop_prs2_busy_0; // @[issue-slot.scala:69:7] wire io_out_uop_prs3_busy_0; // @[issue-slot.scala:69:7] wire io_out_uop_ppred_busy_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_stale_pdst_0; // @[issue-slot.scala:69:7] wire io_out_uop_exception_0; // @[issue-slot.scala:69:7] wire [63:0] io_out_uop_exc_cause_0; // @[issue-slot.scala:69:7] wire io_out_uop_bypassable_0; // @[issue-slot.scala:69:7] wire [4:0] io_out_uop_mem_cmd_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_mem_size_0; // @[issue-slot.scala:69:7] wire io_out_uop_mem_signed_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_fence_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_fencei_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_amo_0; // @[issue-slot.scala:69:7] wire io_out_uop_uses_ldq_0; // @[issue-slot.scala:69:7] wire io_out_uop_uses_stq_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_sys_pc2epc_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_unique_0; // @[issue-slot.scala:69:7] wire io_out_uop_flush_on_commit_0; // @[issue-slot.scala:69:7] wire io_out_uop_ldst_is_rs1_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_ldst_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_lrs1_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_lrs2_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_lrs3_0; // @[issue-slot.scala:69:7] wire io_out_uop_ldst_val_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_dst_rtype_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_lrs1_rtype_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_lrs2_rtype_0; // @[issue-slot.scala:69:7] wire io_out_uop_frs3_en_0; // @[issue-slot.scala:69:7] wire io_out_uop_fp_val_0; // @[issue-slot.scala:69:7] wire io_out_uop_fp_single_0; // @[issue-slot.scala:69:7] wire io_out_uop_xcpt_pf_if_0; // @[issue-slot.scala:69:7] wire io_out_uop_xcpt_ae_if_0; // @[issue-slot.scala:69:7] wire io_out_uop_xcpt_ma_if_0; // @[issue-slot.scala:69:7] wire io_out_uop_bp_debug_if_0; // @[issue-slot.scala:69:7] wire io_out_uop_bp_xcpt_if_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_debug_fsrc_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_debug_tsrc_0; // @[issue-slot.scala:69:7] wire [3:0] io_uop_ctrl_br_type_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_ctrl_op1_sel_0; // @[issue-slot.scala:69:7] wire [2:0] io_uop_ctrl_op2_sel_0; // @[issue-slot.scala:69:7] wire [2:0] io_uop_ctrl_imm_sel_0; // @[issue-slot.scala:69:7] wire [4:0] io_uop_ctrl_op_fcn_0; // @[issue-slot.scala:69:7] wire io_uop_ctrl_fcn_dw_0; // @[issue-slot.scala:69:7] wire [2:0] io_uop_ctrl_csr_cmd_0; // @[issue-slot.scala:69:7] wire io_uop_ctrl_is_load_0; // @[issue-slot.scala:69:7] wire io_uop_ctrl_is_sta_0; // @[issue-slot.scala:69:7] wire io_uop_ctrl_is_std_0; // @[issue-slot.scala:69:7] wire [6:0] io_uop_uopc_0; // @[issue-slot.scala:69:7] wire [31:0] io_uop_inst_0; // @[issue-slot.scala:69:7] wire [31:0] io_uop_debug_inst_0; // @[issue-slot.scala:69:7] wire io_uop_is_rvc_0; // @[issue-slot.scala:69:7] wire [39:0] io_uop_debug_pc_0; // @[issue-slot.scala:69:7] wire [2:0] io_uop_iq_type_0; // @[issue-slot.scala:69:7] wire [9:0] io_uop_fu_code_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_iw_state_0; // @[issue-slot.scala:69:7] wire io_uop_iw_p1_poisoned_0; // @[issue-slot.scala:69:7] wire io_uop_iw_p2_poisoned_0; // @[issue-slot.scala:69:7] wire io_uop_is_br_0; // @[issue-slot.scala:69:7] wire io_uop_is_jalr_0; // @[issue-slot.scala:69:7] wire io_uop_is_jal_0; // @[issue-slot.scala:69:7] wire io_uop_is_sfb_0; // @[issue-slot.scala:69:7] wire [7:0] io_uop_br_mask_0; // @[issue-slot.scala:69:7] wire [2:0] io_uop_br_tag_0; // @[issue-slot.scala:69:7] wire [3:0] io_uop_ftq_idx_0; // @[issue-slot.scala:69:7] wire io_uop_edge_inst_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_pc_lob_0; // @[issue-slot.scala:69:7] wire io_uop_taken_0; // @[issue-slot.scala:69:7] wire [19:0] io_uop_imm_packed_0; // @[issue-slot.scala:69:7] wire [11:0] io_uop_csr_addr_0; // @[issue-slot.scala:69:7] wire [4:0] io_uop_rob_idx_0; // @[issue-slot.scala:69:7] wire [2:0] io_uop_ldq_idx_0; // @[issue-slot.scala:69:7] wire [2:0] io_uop_stq_idx_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_rxq_idx_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_pdst_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_prs1_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_prs2_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_prs3_0; // @[issue-slot.scala:69:7] wire [3:0] io_uop_ppred_0; // @[issue-slot.scala:69:7] wire io_uop_prs1_busy_0; // @[issue-slot.scala:69:7] wire io_uop_prs2_busy_0; // @[issue-slot.scala:69:7] wire io_uop_prs3_busy_0; // @[issue-slot.scala:69:7] wire io_uop_ppred_busy_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_stale_pdst_0; // @[issue-slot.scala:69:7] wire io_uop_exception_0; // @[issue-slot.scala:69:7] wire [63:0] io_uop_exc_cause_0; // @[issue-slot.scala:69:7] wire io_uop_bypassable_0; // @[issue-slot.scala:69:7] wire [4:0] io_uop_mem_cmd_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_mem_size_0; // @[issue-slot.scala:69:7] wire io_uop_mem_signed_0; // @[issue-slot.scala:69:7] wire io_uop_is_fence_0; // @[issue-slot.scala:69:7] wire io_uop_is_fencei_0; // @[issue-slot.scala:69:7] wire io_uop_is_amo_0; // @[issue-slot.scala:69:7] wire io_uop_uses_ldq_0; // @[issue-slot.scala:69:7] wire io_uop_uses_stq_0; // @[issue-slot.scala:69:7] wire io_uop_is_sys_pc2epc_0; // @[issue-slot.scala:69:7] wire io_uop_is_unique_0; // @[issue-slot.scala:69:7] wire io_uop_flush_on_commit_0; // @[issue-slot.scala:69:7] wire io_uop_ldst_is_rs1_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_ldst_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_lrs1_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_lrs2_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_lrs3_0; // @[issue-slot.scala:69:7] wire io_uop_ldst_val_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_dst_rtype_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_lrs1_rtype_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_lrs2_rtype_0; // @[issue-slot.scala:69:7] wire io_uop_frs3_en_0; // @[issue-slot.scala:69:7] wire io_uop_fp_val_0; // @[issue-slot.scala:69:7] wire io_uop_fp_single_0; // @[issue-slot.scala:69:7] wire io_uop_xcpt_pf_if_0; // @[issue-slot.scala:69:7] wire io_uop_xcpt_ae_if_0; // @[issue-slot.scala:69:7] wire io_uop_xcpt_ma_if_0; // @[issue-slot.scala:69:7] wire io_uop_bp_debug_if_0; // @[issue-slot.scala:69:7] wire io_uop_bp_xcpt_if_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_debug_fsrc_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_debug_tsrc_0; // @[issue-slot.scala:69:7] wire io_debug_p1_0; // @[issue-slot.scala:69:7] wire io_debug_p2_0; // @[issue-slot.scala:69:7] wire io_debug_p3_0; // @[issue-slot.scala:69:7] wire io_debug_ppred_0; // @[issue-slot.scala:69:7] wire [1:0] io_debug_state_0; // @[issue-slot.scala:69:7] wire io_valid_0; // @[issue-slot.scala:69:7] wire io_will_be_valid_0; // @[issue-slot.scala:69:7] wire io_request_0; // @[issue-slot.scala:69:7] wire io_request_hp_0; // @[issue-slot.scala:69:7] assign io_out_uop_iw_state_0 = next_state; // @[issue-slot.scala:69:7, :81:29] assign io_out_uop_uopc_0 = next_uopc; // @[issue-slot.scala:69:7, :82:29] assign io_out_uop_lrs1_rtype_0 = next_lrs1_rtype; // @[issue-slot.scala:69:7, :83:29] assign io_out_uop_lrs2_rtype_0 = next_lrs2_rtype; // @[issue-slot.scala:69:7, :84:29] reg [1:0] state; // @[issue-slot.scala:86:22] assign io_debug_state_0 = state; // @[issue-slot.scala:69:7, :86:22] reg p1; // @[issue-slot.scala:87:22] assign io_debug_p1_0 = p1; // @[issue-slot.scala:69:7, :87:22] wire next_p1 = p1; // @[issue-slot.scala:87:22, :163:25] reg p2; // @[issue-slot.scala:88:22] assign io_debug_p2_0 = p2; // @[issue-slot.scala:69:7, :88:22] wire next_p2 = p2; // @[issue-slot.scala:88:22, :164:25] reg p3; // @[issue-slot.scala:89:22] assign io_debug_p3_0 = p3; // @[issue-slot.scala:69:7, :89:22] wire next_p3 = p3; // @[issue-slot.scala:89:22, :165:25] reg ppred; // @[issue-slot.scala:90:22] assign io_debug_ppred_0 = ppred; // @[issue-slot.scala:69:7, :90:22] wire next_ppred = ppred; // @[issue-slot.scala:90:22, :166:28] reg p1_poisoned; // @[issue-slot.scala:95:28] assign io_out_uop_iw_p1_poisoned_0 = p1_poisoned; // @[issue-slot.scala:69:7, :95:28] assign io_uop_iw_p1_poisoned_0 = p1_poisoned; // @[issue-slot.scala:69:7, :95:28] reg p2_poisoned; // @[issue-slot.scala:96:28] assign io_out_uop_iw_p2_poisoned_0 = p2_poisoned; // @[issue-slot.scala:69:7, :96:28] assign io_uop_iw_p2_poisoned_0 = p2_poisoned; // @[issue-slot.scala:69:7, :96:28] wire next_p1_poisoned = io_in_uop_valid_0 ? io_in_uop_bits_iw_p1_poisoned_0 : p1_poisoned; // @[issue-slot.scala:69:7, :95:28, :99:29] wire next_p2_poisoned = io_in_uop_valid_0 ? io_in_uop_bits_iw_p2_poisoned_0 : p2_poisoned; // @[issue-slot.scala:69:7, :96:28, :100:29] reg [6:0] slot_uop_uopc; // @[issue-slot.scala:102:25] reg [31:0] slot_uop_inst; // @[issue-slot.scala:102:25] assign io_out_uop_inst_0 = slot_uop_inst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_inst_0 = slot_uop_inst; // @[issue-slot.scala:69:7, :102:25] reg [31:0] slot_uop_debug_inst; // @[issue-slot.scala:102:25] assign io_out_uop_debug_inst_0 = slot_uop_debug_inst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_debug_inst_0 = slot_uop_debug_inst; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_rvc; // @[issue-slot.scala:102:25] assign io_out_uop_is_rvc_0 = slot_uop_is_rvc; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_rvc_0 = slot_uop_is_rvc; // @[issue-slot.scala:69:7, :102:25] reg [39:0] slot_uop_debug_pc; // @[issue-slot.scala:102:25] assign io_out_uop_debug_pc_0 = slot_uop_debug_pc; // @[issue-slot.scala:69:7, :102:25] assign io_uop_debug_pc_0 = slot_uop_debug_pc; // @[issue-slot.scala:69:7, :102:25] reg [2:0] slot_uop_iq_type; // @[issue-slot.scala:102:25] assign io_out_uop_iq_type_0 = slot_uop_iq_type; // @[issue-slot.scala:69:7, :102:25] assign io_uop_iq_type_0 = slot_uop_iq_type; // @[issue-slot.scala:69:7, :102:25] reg [9:0] slot_uop_fu_code; // @[issue-slot.scala:102:25] assign io_out_uop_fu_code_0 = slot_uop_fu_code; // @[issue-slot.scala:69:7, :102:25] assign io_uop_fu_code_0 = slot_uop_fu_code; // @[issue-slot.scala:69:7, :102:25] reg [3:0] slot_uop_ctrl_br_type; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_br_type_0 = slot_uop_ctrl_br_type; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_br_type_0 = slot_uop_ctrl_br_type; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_ctrl_op1_sel; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_op1_sel_0 = slot_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_op1_sel_0 = slot_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7, :102:25] reg [2:0] slot_uop_ctrl_op2_sel; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_op2_sel_0 = slot_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_op2_sel_0 = slot_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7, :102:25] reg [2:0] slot_uop_ctrl_imm_sel; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_imm_sel_0 = slot_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_imm_sel_0 = slot_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7, :102:25] reg [4:0] slot_uop_ctrl_op_fcn; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_op_fcn_0 = slot_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_op_fcn_0 = slot_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_fcn_dw_0 = slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_fcn_dw_0 = slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7, :102:25] reg [2:0] slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_csr_cmd_0 = slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_csr_cmd_0 = slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ctrl_is_load; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_is_load_0 = slot_uop_ctrl_is_load; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_is_load_0 = slot_uop_ctrl_is_load; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ctrl_is_sta; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_is_sta_0 = slot_uop_ctrl_is_sta; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_is_sta_0 = slot_uop_ctrl_is_sta; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ctrl_is_std; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_is_std_0 = slot_uop_ctrl_is_std; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_is_std_0 = slot_uop_ctrl_is_std; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_iw_state; // @[issue-slot.scala:102:25] assign io_uop_iw_state_0 = slot_uop_iw_state; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_iw_p1_poisoned; // @[issue-slot.scala:102:25] reg slot_uop_iw_p2_poisoned; // @[issue-slot.scala:102:25] reg slot_uop_is_br; // @[issue-slot.scala:102:25] assign io_out_uop_is_br_0 = slot_uop_is_br; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_br_0 = slot_uop_is_br; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_jalr; // @[issue-slot.scala:102:25] assign io_out_uop_is_jalr_0 = slot_uop_is_jalr; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_jalr_0 = slot_uop_is_jalr; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_jal; // @[issue-slot.scala:102:25] assign io_out_uop_is_jal_0 = slot_uop_is_jal; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_jal_0 = slot_uop_is_jal; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_sfb; // @[issue-slot.scala:102:25] assign io_out_uop_is_sfb_0 = slot_uop_is_sfb; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_sfb_0 = slot_uop_is_sfb; // @[issue-slot.scala:69:7, :102:25] reg [7:0] slot_uop_br_mask; // @[issue-slot.scala:102:25] assign io_uop_br_mask_0 = slot_uop_br_mask; // @[issue-slot.scala:69:7, :102:25] reg [2:0] slot_uop_br_tag; // @[issue-slot.scala:102:25] assign io_out_uop_br_tag_0 = slot_uop_br_tag; // @[issue-slot.scala:69:7, :102:25] assign io_uop_br_tag_0 = slot_uop_br_tag; // @[issue-slot.scala:69:7, :102:25] reg [3:0] slot_uop_ftq_idx; // @[issue-slot.scala:102:25] assign io_out_uop_ftq_idx_0 = slot_uop_ftq_idx; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ftq_idx_0 = slot_uop_ftq_idx; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_edge_inst; // @[issue-slot.scala:102:25] assign io_out_uop_edge_inst_0 = slot_uop_edge_inst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_edge_inst_0 = slot_uop_edge_inst; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_pc_lob; // @[issue-slot.scala:102:25] assign io_out_uop_pc_lob_0 = slot_uop_pc_lob; // @[issue-slot.scala:69:7, :102:25] assign io_uop_pc_lob_0 = slot_uop_pc_lob; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_taken; // @[issue-slot.scala:102:25] assign io_out_uop_taken_0 = slot_uop_taken; // @[issue-slot.scala:69:7, :102:25] assign io_uop_taken_0 = slot_uop_taken; // @[issue-slot.scala:69:7, :102:25] reg [19:0] slot_uop_imm_packed; // @[issue-slot.scala:102:25] assign io_out_uop_imm_packed_0 = slot_uop_imm_packed; // @[issue-slot.scala:69:7, :102:25] assign io_uop_imm_packed_0 = slot_uop_imm_packed; // @[issue-slot.scala:69:7, :102:25] reg [11:0] slot_uop_csr_addr; // @[issue-slot.scala:102:25] assign io_out_uop_csr_addr_0 = slot_uop_csr_addr; // @[issue-slot.scala:69:7, :102:25] assign io_uop_csr_addr_0 = slot_uop_csr_addr; // @[issue-slot.scala:69:7, :102:25] reg [4:0] slot_uop_rob_idx; // @[issue-slot.scala:102:25] assign io_out_uop_rob_idx_0 = slot_uop_rob_idx; // @[issue-slot.scala:69:7, :102:25] assign io_uop_rob_idx_0 = slot_uop_rob_idx; // @[issue-slot.scala:69:7, :102:25] reg [2:0] slot_uop_ldq_idx; // @[issue-slot.scala:102:25] assign io_out_uop_ldq_idx_0 = slot_uop_ldq_idx; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ldq_idx_0 = slot_uop_ldq_idx; // @[issue-slot.scala:69:7, :102:25] reg [2:0] slot_uop_stq_idx; // @[issue-slot.scala:102:25] assign io_out_uop_stq_idx_0 = slot_uop_stq_idx; // @[issue-slot.scala:69:7, :102:25] assign io_uop_stq_idx_0 = slot_uop_stq_idx; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_rxq_idx; // @[issue-slot.scala:102:25] assign io_out_uop_rxq_idx_0 = slot_uop_rxq_idx; // @[issue-slot.scala:69:7, :102:25] assign io_uop_rxq_idx_0 = slot_uop_rxq_idx; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_pdst; // @[issue-slot.scala:102:25] assign io_out_uop_pdst_0 = slot_uop_pdst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_pdst_0 = slot_uop_pdst; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_prs1; // @[issue-slot.scala:102:25] assign io_out_uop_prs1_0 = slot_uop_prs1; // @[issue-slot.scala:69:7, :102:25] assign io_uop_prs1_0 = slot_uop_prs1; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_prs2; // @[issue-slot.scala:102:25] assign io_out_uop_prs2_0 = slot_uop_prs2; // @[issue-slot.scala:69:7, :102:25] assign io_uop_prs2_0 = slot_uop_prs2; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_prs3; // @[issue-slot.scala:102:25] assign io_out_uop_prs3_0 = slot_uop_prs3; // @[issue-slot.scala:69:7, :102:25] assign io_uop_prs3_0 = slot_uop_prs3; // @[issue-slot.scala:69:7, :102:25] reg [3:0] slot_uop_ppred; // @[issue-slot.scala:102:25] assign io_out_uop_ppred_0 = slot_uop_ppred; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ppred_0 = slot_uop_ppred; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_prs1_busy; // @[issue-slot.scala:102:25] assign io_uop_prs1_busy_0 = slot_uop_prs1_busy; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_prs2_busy; // @[issue-slot.scala:102:25] assign io_uop_prs2_busy_0 = slot_uop_prs2_busy; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_prs3_busy; // @[issue-slot.scala:102:25] assign io_uop_prs3_busy_0 = slot_uop_prs3_busy; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ppred_busy; // @[issue-slot.scala:102:25] assign io_uop_ppred_busy_0 = slot_uop_ppred_busy; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_stale_pdst; // @[issue-slot.scala:102:25] assign io_out_uop_stale_pdst_0 = slot_uop_stale_pdst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_stale_pdst_0 = slot_uop_stale_pdst; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_exception; // @[issue-slot.scala:102:25] assign io_out_uop_exception_0 = slot_uop_exception; // @[issue-slot.scala:69:7, :102:25] assign io_uop_exception_0 = slot_uop_exception; // @[issue-slot.scala:69:7, :102:25] reg [63:0] slot_uop_exc_cause; // @[issue-slot.scala:102:25] assign io_out_uop_exc_cause_0 = slot_uop_exc_cause; // @[issue-slot.scala:69:7, :102:25] assign io_uop_exc_cause_0 = slot_uop_exc_cause; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_bypassable; // @[issue-slot.scala:102:25] assign io_out_uop_bypassable_0 = slot_uop_bypassable; // @[issue-slot.scala:69:7, :102:25] assign io_uop_bypassable_0 = slot_uop_bypassable; // @[issue-slot.scala:69:7, :102:25] reg [4:0] slot_uop_mem_cmd; // @[issue-slot.scala:102:25] assign io_out_uop_mem_cmd_0 = slot_uop_mem_cmd; // @[issue-slot.scala:69:7, :102:25] assign io_uop_mem_cmd_0 = slot_uop_mem_cmd; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_mem_size; // @[issue-slot.scala:102:25] assign io_out_uop_mem_size_0 = slot_uop_mem_size; // @[issue-slot.scala:69:7, :102:25] assign io_uop_mem_size_0 = slot_uop_mem_size; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_mem_signed; // @[issue-slot.scala:102:25] assign io_out_uop_mem_signed_0 = slot_uop_mem_signed; // @[issue-slot.scala:69:7, :102:25] assign io_uop_mem_signed_0 = slot_uop_mem_signed; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_fence; // @[issue-slot.scala:102:25] assign io_out_uop_is_fence_0 = slot_uop_is_fence; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_fence_0 = slot_uop_is_fence; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_fencei; // @[issue-slot.scala:102:25] assign io_out_uop_is_fencei_0 = slot_uop_is_fencei; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_fencei_0 = slot_uop_is_fencei; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_amo; // @[issue-slot.scala:102:25] assign io_out_uop_is_amo_0 = slot_uop_is_amo; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_amo_0 = slot_uop_is_amo; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_uses_ldq; // @[issue-slot.scala:102:25] assign io_out_uop_uses_ldq_0 = slot_uop_uses_ldq; // @[issue-slot.scala:69:7, :102:25] assign io_uop_uses_ldq_0 = slot_uop_uses_ldq; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_uses_stq; // @[issue-slot.scala:102:25] assign io_out_uop_uses_stq_0 = slot_uop_uses_stq; // @[issue-slot.scala:69:7, :102:25] assign io_uop_uses_stq_0 = slot_uop_uses_stq; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_sys_pc2epc; // @[issue-slot.scala:102:25] assign io_out_uop_is_sys_pc2epc_0 = slot_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_sys_pc2epc_0 = slot_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_unique; // @[issue-slot.scala:102:25] assign io_out_uop_is_unique_0 = slot_uop_is_unique; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_unique_0 = slot_uop_is_unique; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_flush_on_commit; // @[issue-slot.scala:102:25] assign io_out_uop_flush_on_commit_0 = slot_uop_flush_on_commit; // @[issue-slot.scala:69:7, :102:25] assign io_uop_flush_on_commit_0 = slot_uop_flush_on_commit; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ldst_is_rs1; // @[issue-slot.scala:102:25] assign io_out_uop_ldst_is_rs1_0 = slot_uop_ldst_is_rs1; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ldst_is_rs1_0 = slot_uop_ldst_is_rs1; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_ldst; // @[issue-slot.scala:102:25] assign io_out_uop_ldst_0 = slot_uop_ldst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ldst_0 = slot_uop_ldst; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_lrs1; // @[issue-slot.scala:102:25] assign io_out_uop_lrs1_0 = slot_uop_lrs1; // @[issue-slot.scala:69:7, :102:25] assign io_uop_lrs1_0 = slot_uop_lrs1; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_lrs2; // @[issue-slot.scala:102:25] assign io_out_uop_lrs2_0 = slot_uop_lrs2; // @[issue-slot.scala:69:7, :102:25] assign io_uop_lrs2_0 = slot_uop_lrs2; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_lrs3; // @[issue-slot.scala:102:25] assign io_out_uop_lrs3_0 = slot_uop_lrs3; // @[issue-slot.scala:69:7, :102:25] assign io_uop_lrs3_0 = slot_uop_lrs3; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ldst_val; // @[issue-slot.scala:102:25] assign io_out_uop_ldst_val_0 = slot_uop_ldst_val; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ldst_val_0 = slot_uop_ldst_val; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_dst_rtype; // @[issue-slot.scala:102:25] assign io_out_uop_dst_rtype_0 = slot_uop_dst_rtype; // @[issue-slot.scala:69:7, :102:25] assign io_uop_dst_rtype_0 = slot_uop_dst_rtype; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_lrs1_rtype; // @[issue-slot.scala:102:25] reg [1:0] slot_uop_lrs2_rtype; // @[issue-slot.scala:102:25] reg slot_uop_frs3_en; // @[issue-slot.scala:102:25] assign io_out_uop_frs3_en_0 = slot_uop_frs3_en; // @[issue-slot.scala:69:7, :102:25] assign io_uop_frs3_en_0 = slot_uop_frs3_en; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_fp_val; // @[issue-slot.scala:102:25] assign io_out_uop_fp_val_0 = slot_uop_fp_val; // @[issue-slot.scala:69:7, :102:25] assign io_uop_fp_val_0 = slot_uop_fp_val; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_fp_single; // @[issue-slot.scala:102:25] assign io_out_uop_fp_single_0 = slot_uop_fp_single; // @[issue-slot.scala:69:7, :102:25] assign io_uop_fp_single_0 = slot_uop_fp_single; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_xcpt_pf_if; // @[issue-slot.scala:102:25] assign io_out_uop_xcpt_pf_if_0 = slot_uop_xcpt_pf_if; // @[issue-slot.scala:69:7, :102:25] assign io_uop_xcpt_pf_if_0 = slot_uop_xcpt_pf_if; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_xcpt_ae_if; // @[issue-slot.scala:102:25] assign io_out_uop_xcpt_ae_if_0 = slot_uop_xcpt_ae_if; // @[issue-slot.scala:69:7, :102:25] assign io_uop_xcpt_ae_if_0 = slot_uop_xcpt_ae_if; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_xcpt_ma_if; // @[issue-slot.scala:102:25] assign io_out_uop_xcpt_ma_if_0 = slot_uop_xcpt_ma_if; // @[issue-slot.scala:69:7, :102:25] assign io_uop_xcpt_ma_if_0 = slot_uop_xcpt_ma_if; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_bp_debug_if; // @[issue-slot.scala:102:25] assign io_out_uop_bp_debug_if_0 = slot_uop_bp_debug_if; // @[issue-slot.scala:69:7, :102:25] assign io_uop_bp_debug_if_0 = slot_uop_bp_debug_if; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_bp_xcpt_if; // @[issue-slot.scala:102:25] assign io_out_uop_bp_xcpt_if_0 = slot_uop_bp_xcpt_if; // @[issue-slot.scala:69:7, :102:25] assign io_uop_bp_xcpt_if_0 = slot_uop_bp_xcpt_if; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_debug_fsrc; // @[issue-slot.scala:102:25] assign io_out_uop_debug_fsrc_0 = slot_uop_debug_fsrc; // @[issue-slot.scala:69:7, :102:25] assign io_uop_debug_fsrc_0 = slot_uop_debug_fsrc; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_debug_tsrc; // @[issue-slot.scala:102:25] assign io_out_uop_debug_tsrc_0 = slot_uop_debug_tsrc; // @[issue-slot.scala:69:7, :102:25] assign io_uop_debug_tsrc_0 = slot_uop_debug_tsrc; // @[issue-slot.scala:69:7, :102:25] wire [6:0] next_uop_uopc = io_in_uop_valid_0 ? io_in_uop_bits_uopc_0 : slot_uop_uopc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [31:0] next_uop_inst = io_in_uop_valid_0 ? io_in_uop_bits_inst_0 : slot_uop_inst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [31:0] next_uop_debug_inst = io_in_uop_valid_0 ? io_in_uop_bits_debug_inst_0 : slot_uop_debug_inst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_rvc = io_in_uop_valid_0 ? io_in_uop_bits_is_rvc_0 : slot_uop_is_rvc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [39:0] next_uop_debug_pc = io_in_uop_valid_0 ? io_in_uop_bits_debug_pc_0 : slot_uop_debug_pc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [2:0] next_uop_iq_type = io_in_uop_valid_0 ? io_in_uop_bits_iq_type_0 : slot_uop_iq_type; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [9:0] next_uop_fu_code = io_in_uop_valid_0 ? io_in_uop_bits_fu_code_0 : slot_uop_fu_code; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [3:0] next_uop_ctrl_br_type = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_br_type_0 : slot_uop_ctrl_br_type; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_ctrl_op1_sel = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_op1_sel_0 : slot_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [2:0] next_uop_ctrl_op2_sel = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_op2_sel_0 : slot_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [2:0] next_uop_ctrl_imm_sel = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_imm_sel_0 : slot_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [4:0] next_uop_ctrl_op_fcn = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_op_fcn_0 : slot_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ctrl_fcn_dw = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_fcn_dw_0 : slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [2:0] next_uop_ctrl_csr_cmd = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_csr_cmd_0 : slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ctrl_is_load = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_is_load_0 : slot_uop_ctrl_is_load; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ctrl_is_sta = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_is_sta_0 : slot_uop_ctrl_is_sta; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ctrl_is_std = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_is_std_0 : slot_uop_ctrl_is_std; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_iw_state = io_in_uop_valid_0 ? io_in_uop_bits_iw_state_0 : slot_uop_iw_state; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_iw_p1_poisoned = io_in_uop_valid_0 ? io_in_uop_bits_iw_p1_poisoned_0 : slot_uop_iw_p1_poisoned; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_iw_p2_poisoned = io_in_uop_valid_0 ? io_in_uop_bits_iw_p2_poisoned_0 : slot_uop_iw_p2_poisoned; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_br = io_in_uop_valid_0 ? io_in_uop_bits_is_br_0 : slot_uop_is_br; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_jalr = io_in_uop_valid_0 ? io_in_uop_bits_is_jalr_0 : slot_uop_is_jalr; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_jal = io_in_uop_valid_0 ? io_in_uop_bits_is_jal_0 : slot_uop_is_jal; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_sfb = io_in_uop_valid_0 ? io_in_uop_bits_is_sfb_0 : slot_uop_is_sfb; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [7:0] next_uop_br_mask = io_in_uop_valid_0 ? io_in_uop_bits_br_mask_0 : slot_uop_br_mask; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [2:0] next_uop_br_tag = io_in_uop_valid_0 ? io_in_uop_bits_br_tag_0 : slot_uop_br_tag; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [3:0] next_uop_ftq_idx = io_in_uop_valid_0 ? io_in_uop_bits_ftq_idx_0 : slot_uop_ftq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_edge_inst = io_in_uop_valid_0 ? io_in_uop_bits_edge_inst_0 : slot_uop_edge_inst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_pc_lob = io_in_uop_valid_0 ? io_in_uop_bits_pc_lob_0 : slot_uop_pc_lob; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_taken = io_in_uop_valid_0 ? io_in_uop_bits_taken_0 : slot_uop_taken; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [19:0] next_uop_imm_packed = io_in_uop_valid_0 ? io_in_uop_bits_imm_packed_0 : slot_uop_imm_packed; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [11:0] next_uop_csr_addr = io_in_uop_valid_0 ? io_in_uop_bits_csr_addr_0 : slot_uop_csr_addr; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [4:0] next_uop_rob_idx = io_in_uop_valid_0 ? io_in_uop_bits_rob_idx_0 : slot_uop_rob_idx; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [2:0] next_uop_ldq_idx = io_in_uop_valid_0 ? io_in_uop_bits_ldq_idx_0 : slot_uop_ldq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [2:0] next_uop_stq_idx = io_in_uop_valid_0 ? io_in_uop_bits_stq_idx_0 : slot_uop_stq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_rxq_idx = io_in_uop_valid_0 ? io_in_uop_bits_rxq_idx_0 : slot_uop_rxq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_pdst = io_in_uop_valid_0 ? io_in_uop_bits_pdst_0 : slot_uop_pdst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_prs1 = io_in_uop_valid_0 ? io_in_uop_bits_prs1_0 : slot_uop_prs1; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_prs2 = io_in_uop_valid_0 ? io_in_uop_bits_prs2_0 : slot_uop_prs2; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_prs3 = io_in_uop_valid_0 ? io_in_uop_bits_prs3_0 : slot_uop_prs3; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [3:0] next_uop_ppred = io_in_uop_valid_0 ? 4'h0 : slot_uop_ppred; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_prs1_busy = io_in_uop_valid_0 ? io_in_uop_bits_prs1_busy_0 : slot_uop_prs1_busy; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_prs2_busy = io_in_uop_valid_0 ? io_in_uop_bits_prs2_busy_0 : slot_uop_prs2_busy; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_prs3_busy = io_in_uop_valid_0 ? io_in_uop_bits_prs3_busy_0 : slot_uop_prs3_busy; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ppred_busy = io_in_uop_valid_0 ? io_in_uop_bits_ppred_busy_0 : slot_uop_ppred_busy; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_stale_pdst = io_in_uop_valid_0 ? io_in_uop_bits_stale_pdst_0 : slot_uop_stale_pdst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_exception = io_in_uop_valid_0 ? io_in_uop_bits_exception_0 : slot_uop_exception; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [63:0] next_uop_exc_cause = io_in_uop_valid_0 ? io_in_uop_bits_exc_cause_0 : slot_uop_exc_cause; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_bypassable = io_in_uop_valid_0 ? io_in_uop_bits_bypassable_0 : slot_uop_bypassable; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [4:0] next_uop_mem_cmd = io_in_uop_valid_0 ? io_in_uop_bits_mem_cmd_0 : slot_uop_mem_cmd; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_mem_size = io_in_uop_valid_0 ? io_in_uop_bits_mem_size_0 : slot_uop_mem_size; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_mem_signed = io_in_uop_valid_0 ? io_in_uop_bits_mem_signed_0 : slot_uop_mem_signed; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_fence = io_in_uop_valid_0 ? io_in_uop_bits_is_fence_0 : slot_uop_is_fence; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_fencei = io_in_uop_valid_0 ? io_in_uop_bits_is_fencei_0 : slot_uop_is_fencei; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_amo = io_in_uop_valid_0 ? io_in_uop_bits_is_amo_0 : slot_uop_is_amo; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_uses_ldq = io_in_uop_valid_0 ? io_in_uop_bits_uses_ldq_0 : slot_uop_uses_ldq; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_uses_stq = io_in_uop_valid_0 ? io_in_uop_bits_uses_stq_0 : slot_uop_uses_stq; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_sys_pc2epc = io_in_uop_valid_0 ? io_in_uop_bits_is_sys_pc2epc_0 : slot_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_unique = io_in_uop_valid_0 ? io_in_uop_bits_is_unique_0 : slot_uop_is_unique; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_flush_on_commit = io_in_uop_valid_0 ? io_in_uop_bits_flush_on_commit_0 : slot_uop_flush_on_commit; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ldst_is_rs1 = io_in_uop_valid_0 ? io_in_uop_bits_ldst_is_rs1_0 : slot_uop_ldst_is_rs1; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_ldst = io_in_uop_valid_0 ? io_in_uop_bits_ldst_0 : slot_uop_ldst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_lrs1 = io_in_uop_valid_0 ? io_in_uop_bits_lrs1_0 : slot_uop_lrs1; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_lrs2 = io_in_uop_valid_0 ? io_in_uop_bits_lrs2_0 : slot_uop_lrs2; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_lrs3 = io_in_uop_valid_0 ? io_in_uop_bits_lrs3_0 : slot_uop_lrs3; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ldst_val = io_in_uop_valid_0 ? io_in_uop_bits_ldst_val_0 : slot_uop_ldst_val; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_dst_rtype = io_in_uop_valid_0 ? io_in_uop_bits_dst_rtype_0 : slot_uop_dst_rtype; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_lrs1_rtype = io_in_uop_valid_0 ? io_in_uop_bits_lrs1_rtype_0 : slot_uop_lrs1_rtype; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_lrs2_rtype = io_in_uop_valid_0 ? io_in_uop_bits_lrs2_rtype_0 : slot_uop_lrs2_rtype; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_frs3_en = io_in_uop_valid_0 ? io_in_uop_bits_frs3_en_0 : slot_uop_frs3_en; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_fp_val = io_in_uop_valid_0 ? io_in_uop_bits_fp_val_0 : slot_uop_fp_val; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_fp_single = io_in_uop_valid_0 ? io_in_uop_bits_fp_single_0 : slot_uop_fp_single; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_xcpt_pf_if = io_in_uop_valid_0 ? io_in_uop_bits_xcpt_pf_if_0 : slot_uop_xcpt_pf_if; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_xcpt_ae_if = io_in_uop_valid_0 ? io_in_uop_bits_xcpt_ae_if_0 : slot_uop_xcpt_ae_if; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_xcpt_ma_if = io_in_uop_valid_0 ? io_in_uop_bits_xcpt_ma_if_0 : slot_uop_xcpt_ma_if; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_bp_debug_if = io_in_uop_valid_0 ? io_in_uop_bits_bp_debug_if_0 : slot_uop_bp_debug_if; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_bp_xcpt_if = io_in_uop_valid_0 ? io_in_uop_bits_bp_xcpt_if_0 : slot_uop_bp_xcpt_if; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_debug_fsrc = io_in_uop_valid_0 ? io_in_uop_bits_debug_fsrc_0 : slot_uop_debug_fsrc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_debug_tsrc = io_in_uop_valid_0 ? io_in_uop_bits_debug_tsrc_0 : slot_uop_debug_tsrc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire _T_11 = state == 2'h2; // @[issue-slot.scala:86:22, :134:25] wire _T_7 = io_grant_0 & state == 2'h1 | io_grant_0 & _T_11 & p1 & p2 & ppred; // @[issue-slot.scala:69:7, :86:22, :87:22, :88:22, :90:22, :133:{26,36,52}, :134:{15,25,40,46,52}] wire _T_12 = io_grant_0 & _T_11; // @[issue-slot.scala:69:7, :134:25, :139:25] wire _T_14 = io_ldspec_miss_0 & (p1_poisoned | p2_poisoned); // @[issue-slot.scala:69:7, :95:28, :96:28, :140:{28,44}] wire _GEN = _T_12 & ~_T_14; // @[issue-slot.scala:126:14, :139:{25,51}, :140:{11,28,62}, :141:18] wire _GEN_0 = io_kill_0 | _T_7; // @[issue-slot.scala:69:7, :102:25, :131:18, :133:52, :134:63, :139:51] wire _GEN_1 = _GEN_0 | ~(_T_12 & ~_T_14 & p1); // @[issue-slot.scala:87:22, :102:25, :131:18, :134:63, :139:{25,51}, :140:{11,28,62}, :142:17, :143:23] assign next_uopc = _GEN_1 ? slot_uop_uopc : 7'h3; // @[issue-slot.scala:82:29, :102:25, :131:18, :134:63, :139:51] assign next_lrs1_rtype = _GEN_1 ? slot_uop_lrs1_rtype : 2'h2; // @[issue-slot.scala:83:29, :102:25, :131:18, :134:63, :139:51] wire _GEN_2 = _GEN_0 | ~_GEN | p1; // @[issue-slot.scala:87:22, :102:25, :126:14, :131:18, :134:63, :139:51, :140:62, :141:18, :142:17] assign next_lrs2_rtype = _GEN_2 ? slot_uop_lrs2_rtype : 2'h2; // @[issue-slot.scala:84:29, :102:25, :131:18, :134:63, :139:51, :140:62, :142:17] wire _p1_T = ~io_in_uop_bits_prs1_busy_0; // @[issue-slot.scala:69:7, :169:11] wire _p2_T = ~io_in_uop_bits_prs2_busy_0; // @[issue-slot.scala:69:7, :170:11] wire _p3_T = ~io_in_uop_bits_prs3_busy_0; // @[issue-slot.scala:69:7, :171:11] wire _ppred_T = ~io_in_uop_bits_ppred_busy_0; // @[issue-slot.scala:69:7, :172:14] wire _T_22 = io_ldspec_miss_0 & next_p1_poisoned; // @[issue-slot.scala:69:7, :99:29, :175:24] wire _T_27 = io_ldspec_miss_0 & next_p2_poisoned; // @[issue-slot.scala:69:7, :100:29, :179:24] wire _T_61 = io_spec_ld_wakeup_0_valid_0 & io_spec_ld_wakeup_0_bits_0 == next_uop_prs1 & next_uop_lrs1_rtype == 2'h0; // @[issue-slot.scala:69:7, :103:21, :209:38, :210:{33,51}, :211:27] wire _T_69 = io_spec_ld_wakeup_0_valid_0 & io_spec_ld_wakeup_0_bits_0 == next_uop_prs2 & next_uop_lrs2_rtype == 2'h0; // @[issue-slot.scala:69:7, :103:21, :216:38, :217:{33,51}, :218:27]
Generate the Verilog code corresponding to the following Chisel files. File DescribedSRAM.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3.{Data, SyncReadMem, Vec} import chisel3.util.log2Ceil object DescribedSRAM { def apply[T <: Data]( name: String, desc: String, size: BigInt, // depth data: T ): SyncReadMem[T] = { val mem = SyncReadMem(size, data) mem.suggestName(name) val granWidth = data match { case v: Vec[_] => v.head.getWidth case d => d.getWidth } val uid = 0 Annotated.srams( component = mem, name = name, address_width = log2Ceil(size), data_width = data.getWidth, depth = size, description = desc, write_mask_granularity = granWidth ) mem } }
module rockettile_icache_data_arrays_0( // @[DescribedSRAM.scala:17:26] input [6:0] RW0_addr, input RW0_en, input RW0_clk, input RW0_wmode, input [255:0] RW0_wdata, output [255:0] RW0_rdata, input [7:0] RW0_wmask ); rockettile_icache_data_arrays_0_ext rockettile_icache_data_arrays_0_ext ( // @[DescribedSRAM.scala:17:26] .RW0_addr (RW0_addr), .RW0_en (RW0_en), .RW0_clk (RW0_clk), .RW0_wmode (RW0_wmode), .RW0_wdata (RW0_wdata), .RW0_rdata (RW0_rdata), .RW0_wmask (RW0_wmask) ); // @[DescribedSRAM.scala:17:26] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_68( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [1:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input io_in_b_ready, // @[Monitor.scala:20:14] input io_in_b_valid, // @[Monitor.scala:20:14] input [2:0] io_in_b_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_b_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_b_bits_size, // @[Monitor.scala:20:14] input [1:0] io_in_b_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_b_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_b_bits_mask, // @[Monitor.scala:20:14] input io_in_b_bits_corrupt, // @[Monitor.scala:20:14] input io_in_c_ready, // @[Monitor.scala:20:14] input io_in_c_valid, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_c_bits_size, // @[Monitor.scala:20:14] input [1:0] io_in_c_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_c_bits_address, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [4:0] io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt, // @[Monitor.scala:20:14] input io_in_e_ready, // @[Monitor.scala:20:14] input io_in_e_valid, // @[Monitor.scala:20:14] input [4:0] io_in_e_bits_sink // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire [26:0] _GEN = {23'h0, io_in_a_bits_size}; // @[package.scala:243:71] wire [26:0] _GEN_0 = {23'h0, io_in_c_bits_size}; // @[package.scala:243:71] wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35] reg [8:0] a_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [3:0] size; // @[Monitor.scala:389:22] reg [1:0] source; // @[Monitor.scala:390:22] reg [31:0] address; // @[Monitor.scala:391:22] wire _d_first_T_3 = io_in_d_ready & io_in_d_valid; // @[Decoupled.scala:51:35] reg [8:0] d_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [3:0] size_1; // @[Monitor.scala:540:22] reg [1:0] source_1; // @[Monitor.scala:541:22] reg [4:0] sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [8:0] b_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_2; // @[Monitor.scala:410:22] reg [1:0] param_2; // @[Monitor.scala:411:22] reg [3:0] size_2; // @[Monitor.scala:412:22] reg [1:0] source_2; // @[Monitor.scala:413:22] reg [31:0] address_1; // @[Monitor.scala:414:22] wire _c_first_T_1 = io_in_c_ready & io_in_c_valid; // @[Decoupled.scala:51:35] reg [8:0] c_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_3; // @[Monitor.scala:515:22] reg [2:0] param_3; // @[Monitor.scala:516:22] reg [3:0] size_3; // @[Monitor.scala:517:22] reg [1:0] source_3; // @[Monitor.scala:518:22] reg [31:0] address_2; // @[Monitor.scala:519:22] reg [2:0] inflight; // @[Monitor.scala:614:27] reg [11:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [23:0] inflight_sizes; // @[Monitor.scala:618:33] reg [8:0] a_first_counter_1; // @[Edges.scala:229:27] wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] reg [8:0] d_first_counter_1; // @[Edges.scala:229:27] wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] wire [3:0] _GEN_1 = {2'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35] wire _GEN_2 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35] wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46] wire _GEN_3 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74] wire [3:0] _GEN_4 = {2'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35] reg [31:0] watchdog; // @[Monitor.scala:709:27] reg [2:0] inflight_1; // @[Monitor.scala:726:35] reg [23:0] inflight_sizes_1; // @[Monitor.scala:728:35] reg [8:0] c_first_counter_1; // @[Edges.scala:229:27] wire c_first_1 = c_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] reg [8:0] d_first_counter_2; // @[Edges.scala:229:27] wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _GEN_5 = io_in_c_bits_opcode[2] & io_in_c_bits_opcode[1]; // @[Edges.scala:68:{36,40,51}] wire [3:0] _GEN_6 = {2'h0, io_in_c_bits_source}; // @[OneHot.scala:58:35] wire _GEN_7 = _c_first_T_1 & c_first_1 & _GEN_5; // @[Decoupled.scala:51:35] reg [31:0] watchdog_1; // @[Monitor.scala:818:27] reg [31:0] inflight_2; // @[Monitor.scala:828:27] reg [8:0] d_first_counter_3; // @[Edges.scala:229:27] wire d_first_3 = d_first_counter_3 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _GEN_8 = _d_first_T_3 & d_first_3 & io_in_d_bits_opcode[2] & ~(io_in_d_bits_opcode[1]); // @[Decoupled.scala:51:35] wire [31:0] _GEN_9 = {27'h0, io_in_d_bits_sink}; // @[OneHot.scala:58:35] wire [31:0] d_set = _GEN_8 ? 32'h1 << _GEN_9 : 32'h0; // @[OneHot.scala:58:35] wire _GEN_10 = io_in_e_ready & io_in_e_valid; // @[Decoupled.scala:51:35] wire [31:0] _GEN_11 = {27'h0, io_in_e_bits_sink}; // @[OneHot.scala:58:35]
Generate the Verilog code corresponding to the following Chisel files. File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } } File UserYanker.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.amba.axi4 import chisel3._ import chisel3.util.{Queue, QueueIO, UIntToOH} import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.lazymodule.{LazyModule, LazyModuleImp} import freechips.rocketchip.util.BundleMap /** This adapter prunes all user bit fields of the echo type from request messages, * storing them in queues and echoing them back when matching response messages are received. * * It also optionally rate limits the number of transactions that can be in flight simultaneously * per FIFO domain / A[W|R]ID. * * @param capMaxFlight is an optional maximum number of transactions that can be in flight per A[W|R]ID. */ class AXI4UserYanker(capMaxFlight: Option[Int] = None)(implicit p: Parameters) extends LazyModule { val node = AXI4AdapterNode( masterFn = { mp => mp.copy( masters = mp.masters.map { m => m.copy( maxFlight = (m.maxFlight, capMaxFlight) match { case (Some(x), Some(y)) => Some(x min y) case (Some(x), None) => Some(x) case (None, Some(y)) => Some(y) case (None, None) => None })}, echoFields = Nil)}, slaveFn = { sp => sp }) lazy val module = new Impl class Impl extends LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => // Which fields are we stripping? val echoFields = edgeIn.master.echoFields val need_bypass = edgeOut.slave.minLatency < 1 edgeOut.master.masters.foreach { m => require (m.maxFlight.isDefined, "UserYanker needs a flight cap on each ID") } def queue(id: Int) = { val depth = edgeOut.master.masters.find(_.id.contains(id)).flatMap(_.maxFlight).getOrElse(0) if (depth == 0) { Wire(new QueueIO(BundleMap(echoFields), 1)) // unused ID => undefined value } else { Module(new Queue(BundleMap(echoFields), depth, flow=need_bypass)).io } } val rqueues = Seq.tabulate(edgeIn.master.endId) { i => queue(i) } val wqueues = Seq.tabulate(edgeIn.master.endId) { i => queue(i) } val arid = in.ar.bits.id val ar_ready = VecInit(rqueues.map(_.enq.ready))(arid) in .ar.ready := out.ar.ready && ar_ready out.ar.valid := in .ar.valid && ar_ready Connectable.waiveUnmatched(out.ar.bits, in.ar.bits) match { case (lhs, rhs) => lhs :<= rhs } val rid = out.r.bits.id val r_valid = VecInit(rqueues.map(_.deq.valid))(rid) val r_bits = VecInit(rqueues.map(_.deq.bits))(rid) assert (!out.r.valid || r_valid) // Q must be ready faster than the response Connectable.waiveUnmatched(in.r, out.r) match { case (lhs, rhs) => lhs :<>= rhs } in.r.bits.echo :<= r_bits val arsel = UIntToOH(arid, edgeIn.master.endId).asBools val rsel = UIntToOH(rid, edgeIn.master.endId).asBools (rqueues zip (arsel zip rsel)) foreach { case (q, (ar, r)) => q.deq.ready := out.r .valid && in .r .ready && r && out.r.bits.last q.deq.valid := DontCare q.deq.bits := DontCare q.enq.valid := in .ar.valid && out.ar.ready && ar q.enq.ready := DontCare q.enq.bits :<>= in.ar.bits.echo q.count := DontCare } val awid = in.aw.bits.id val aw_ready = VecInit(wqueues.map(_.enq.ready))(awid) in .aw.ready := out.aw.ready && aw_ready out.aw.valid := in .aw.valid && aw_ready Connectable.waiveUnmatched(out.aw.bits, in.aw.bits) match { case (lhs, rhs) => lhs :<>= rhs } val bid = out.b.bits.id val b_valid = VecInit(wqueues.map(_.deq.valid))(bid) val b_bits = VecInit(wqueues.map(_.deq.bits))(bid) assert (!out.b.valid || b_valid) // Q must be ready faster than the response Connectable.waiveUnmatched(in.b, out.b) match { case (lhs, rhs) => lhs :<>= rhs } in.b.bits.echo :<>= b_bits val awsel = UIntToOH(awid, edgeIn.master.endId).asBools val bsel = UIntToOH(bid, edgeIn.master.endId).asBools (wqueues zip (awsel zip bsel)) foreach { case (q, (aw, b)) => q.deq.ready := out.b .valid && in .b .ready && b q.deq.valid := DontCare q.deq.bits := DontCare q.enq.valid := in .aw.valid && out.aw.ready && aw q.enq.ready := DontCare q.enq.bits :<>= in.aw.bits.echo q.count := DontCare } out.w :<>= in.w } } } object AXI4UserYanker { def apply(capMaxFlight: Option[Int] = None)(implicit p: Parameters): AXI4Node = { val axi4yank = LazyModule(new AXI4UserYanker(capMaxFlight)) axi4yank.node } }
module AXI4UserYanker( // @[UserYanker.scala:36:9] input clock, // @[UserYanker.scala:36:9] input reset, // @[UserYanker.scala:36:9] output auto_in_aw_ready, // @[LazyModuleImp.scala:107:25] input auto_in_aw_valid, // @[LazyModuleImp.scala:107:25] input [6:0] auto_in_aw_bits_id, // @[LazyModuleImp.scala:107:25] input [31:0] auto_in_aw_bits_addr, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_aw_bits_len, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_aw_bits_size, // @[LazyModuleImp.scala:107:25] input [1:0] auto_in_aw_bits_burst, // @[LazyModuleImp.scala:107:25] input auto_in_aw_bits_lock, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_aw_bits_cache, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_aw_bits_prot, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_aw_bits_qos, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_aw_bits_echo_tl_state_size, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_aw_bits_echo_tl_state_source, // @[LazyModuleImp.scala:107:25] input auto_in_aw_bits_echo_extra_id, // @[LazyModuleImp.scala:107:25] output auto_in_w_ready, // @[LazyModuleImp.scala:107:25] input auto_in_w_valid, // @[LazyModuleImp.scala:107:25] input [63:0] auto_in_w_bits_data, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_w_bits_strb, // @[LazyModuleImp.scala:107:25] input auto_in_w_bits_last, // @[LazyModuleImp.scala:107:25] input auto_in_b_ready, // @[LazyModuleImp.scala:107:25] output auto_in_b_valid, // @[LazyModuleImp.scala:107:25] output [6:0] auto_in_b_bits_id, // @[LazyModuleImp.scala:107:25] output [1:0] auto_in_b_bits_resp, // @[LazyModuleImp.scala:107:25] output [3:0] auto_in_b_bits_echo_tl_state_size, // @[LazyModuleImp.scala:107:25] output [7:0] auto_in_b_bits_echo_tl_state_source, // @[LazyModuleImp.scala:107:25] output auto_in_b_bits_echo_extra_id, // @[LazyModuleImp.scala:107:25] output auto_in_ar_ready, // @[LazyModuleImp.scala:107:25] input auto_in_ar_valid, // @[LazyModuleImp.scala:107:25] input [6:0] auto_in_ar_bits_id, // @[LazyModuleImp.scala:107:25] input [31:0] auto_in_ar_bits_addr, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_ar_bits_len, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_ar_bits_size, // @[LazyModuleImp.scala:107:25] input [1:0] auto_in_ar_bits_burst, // @[LazyModuleImp.scala:107:25] input auto_in_ar_bits_lock, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_ar_bits_cache, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_ar_bits_prot, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_ar_bits_qos, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_ar_bits_echo_tl_state_size, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_ar_bits_echo_tl_state_source, // @[LazyModuleImp.scala:107:25] input auto_in_ar_bits_echo_extra_id, // @[LazyModuleImp.scala:107:25] input auto_in_r_ready, // @[LazyModuleImp.scala:107:25] output auto_in_r_valid, // @[LazyModuleImp.scala:107:25] output [6:0] auto_in_r_bits_id, // @[LazyModuleImp.scala:107:25] output [63:0] auto_in_r_bits_data, // @[LazyModuleImp.scala:107:25] output [1:0] auto_in_r_bits_resp, // @[LazyModuleImp.scala:107:25] output [3:0] auto_in_r_bits_echo_tl_state_size, // @[LazyModuleImp.scala:107:25] output [7:0] auto_in_r_bits_echo_tl_state_source, // @[LazyModuleImp.scala:107:25] output auto_in_r_bits_echo_extra_id, // @[LazyModuleImp.scala:107:25] output auto_in_r_bits_last, // @[LazyModuleImp.scala:107:25] input auto_out_aw_ready, // @[LazyModuleImp.scala:107:25] output auto_out_aw_valid, // @[LazyModuleImp.scala:107:25] output [6:0] auto_out_aw_bits_id, // @[LazyModuleImp.scala:107:25] output [31:0] auto_out_aw_bits_addr, // @[LazyModuleImp.scala:107:25] output [7:0] auto_out_aw_bits_len, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_aw_bits_size, // @[LazyModuleImp.scala:107:25] output [1:0] auto_out_aw_bits_burst, // @[LazyModuleImp.scala:107:25] output auto_out_aw_bits_lock, // @[LazyModuleImp.scala:107:25] output [3:0] auto_out_aw_bits_cache, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_aw_bits_prot, // @[LazyModuleImp.scala:107:25] output [3:0] auto_out_aw_bits_qos, // @[LazyModuleImp.scala:107:25] input auto_out_w_ready, // @[LazyModuleImp.scala:107:25] output auto_out_w_valid, // @[LazyModuleImp.scala:107:25] output [63:0] auto_out_w_bits_data, // @[LazyModuleImp.scala:107:25] output [7:0] auto_out_w_bits_strb, // @[LazyModuleImp.scala:107:25] output auto_out_w_bits_last, // @[LazyModuleImp.scala:107:25] output auto_out_b_ready, // @[LazyModuleImp.scala:107:25] input auto_out_b_valid, // @[LazyModuleImp.scala:107:25] input [6:0] auto_out_b_bits_id, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_b_bits_resp, // @[LazyModuleImp.scala:107:25] input auto_out_ar_ready, // @[LazyModuleImp.scala:107:25] output auto_out_ar_valid, // @[LazyModuleImp.scala:107:25] output [6:0] auto_out_ar_bits_id, // @[LazyModuleImp.scala:107:25] output [31:0] auto_out_ar_bits_addr, // @[LazyModuleImp.scala:107:25] output [7:0] auto_out_ar_bits_len, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_ar_bits_size, // @[LazyModuleImp.scala:107:25] output [1:0] auto_out_ar_bits_burst, // @[LazyModuleImp.scala:107:25] output auto_out_ar_bits_lock, // @[LazyModuleImp.scala:107:25] output [3:0] auto_out_ar_bits_cache, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_ar_bits_prot, // @[LazyModuleImp.scala:107:25] output [3:0] auto_out_ar_bits_qos, // @[LazyModuleImp.scala:107:25] output auto_out_r_ready, // @[LazyModuleImp.scala:107:25] input auto_out_r_valid, // @[LazyModuleImp.scala:107:25] input [6:0] auto_out_r_bits_id, // @[LazyModuleImp.scala:107:25] input [63:0] auto_out_r_bits_data, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_r_bits_resp, // @[LazyModuleImp.scala:107:25] input auto_out_r_bits_last // @[LazyModuleImp.scala:107:25] ); wire auto_in_aw_valid_0 = auto_in_aw_valid; // @[UserYanker.scala:36:9] wire [6:0] auto_in_aw_bits_id_0 = auto_in_aw_bits_id; // @[UserYanker.scala:36:9] wire [31:0] auto_in_aw_bits_addr_0 = auto_in_aw_bits_addr; // @[UserYanker.scala:36:9] wire [7:0] auto_in_aw_bits_len_0 = auto_in_aw_bits_len; // @[UserYanker.scala:36:9] wire [2:0] auto_in_aw_bits_size_0 = auto_in_aw_bits_size; // @[UserYanker.scala:36:9] wire [1:0] auto_in_aw_bits_burst_0 = auto_in_aw_bits_burst; // @[UserYanker.scala:36:9] wire auto_in_aw_bits_lock_0 = auto_in_aw_bits_lock; // @[UserYanker.scala:36:9] wire [3:0] auto_in_aw_bits_cache_0 = auto_in_aw_bits_cache; // @[UserYanker.scala:36:9] wire [2:0] auto_in_aw_bits_prot_0 = auto_in_aw_bits_prot; // @[UserYanker.scala:36:9] wire [3:0] auto_in_aw_bits_qos_0 = auto_in_aw_bits_qos; // @[UserYanker.scala:36:9] wire [3:0] auto_in_aw_bits_echo_tl_state_size_0 = auto_in_aw_bits_echo_tl_state_size; // @[UserYanker.scala:36:9] wire [7:0] auto_in_aw_bits_echo_tl_state_source_0 = auto_in_aw_bits_echo_tl_state_source; // @[UserYanker.scala:36:9] wire auto_in_aw_bits_echo_extra_id_0 = auto_in_aw_bits_echo_extra_id; // @[UserYanker.scala:36:9] wire auto_in_w_valid_0 = auto_in_w_valid; // @[UserYanker.scala:36:9] wire [63:0] auto_in_w_bits_data_0 = auto_in_w_bits_data; // @[UserYanker.scala:36:9] wire [7:0] auto_in_w_bits_strb_0 = auto_in_w_bits_strb; // @[UserYanker.scala:36:9] wire auto_in_w_bits_last_0 = auto_in_w_bits_last; // @[UserYanker.scala:36:9] wire auto_in_b_ready_0 = auto_in_b_ready; // @[UserYanker.scala:36:9] wire auto_in_ar_valid_0 = auto_in_ar_valid; // @[UserYanker.scala:36:9] wire [6:0] auto_in_ar_bits_id_0 = auto_in_ar_bits_id; // @[UserYanker.scala:36:9] wire [31:0] auto_in_ar_bits_addr_0 = auto_in_ar_bits_addr; // @[UserYanker.scala:36:9] wire [7:0] auto_in_ar_bits_len_0 = auto_in_ar_bits_len; // @[UserYanker.scala:36:9] wire [2:0] auto_in_ar_bits_size_0 = auto_in_ar_bits_size; // @[UserYanker.scala:36:9] wire [1:0] auto_in_ar_bits_burst_0 = auto_in_ar_bits_burst; // @[UserYanker.scala:36:9] wire auto_in_ar_bits_lock_0 = auto_in_ar_bits_lock; // @[UserYanker.scala:36:9] wire [3:0] auto_in_ar_bits_cache_0 = auto_in_ar_bits_cache; // @[UserYanker.scala:36:9] wire [2:0] auto_in_ar_bits_prot_0 = auto_in_ar_bits_prot; // @[UserYanker.scala:36:9] wire [3:0] auto_in_ar_bits_qos_0 = auto_in_ar_bits_qos; // @[UserYanker.scala:36:9] wire [3:0] auto_in_ar_bits_echo_tl_state_size_0 = auto_in_ar_bits_echo_tl_state_size; // @[UserYanker.scala:36:9] wire [7:0] auto_in_ar_bits_echo_tl_state_source_0 = auto_in_ar_bits_echo_tl_state_source; // @[UserYanker.scala:36:9] wire auto_in_ar_bits_echo_extra_id_0 = auto_in_ar_bits_echo_extra_id; // @[UserYanker.scala:36:9] wire auto_in_r_ready_0 = auto_in_r_ready; // @[UserYanker.scala:36:9] wire auto_out_aw_ready_0 = auto_out_aw_ready; // @[UserYanker.scala:36:9] wire auto_out_w_ready_0 = auto_out_w_ready; // @[UserYanker.scala:36:9] wire auto_out_b_valid_0 = auto_out_b_valid; // @[UserYanker.scala:36:9] wire [6:0] auto_out_b_bits_id_0 = auto_out_b_bits_id; // @[UserYanker.scala:36:9] wire [1:0] auto_out_b_bits_resp_0 = auto_out_b_bits_resp; // @[UserYanker.scala:36:9] wire auto_out_ar_ready_0 = auto_out_ar_ready; // @[UserYanker.scala:36:9] wire auto_out_r_valid_0 = auto_out_r_valid; // @[UserYanker.scala:36:9] wire [6:0] auto_out_r_bits_id_0 = auto_out_r_bits_id; // @[UserYanker.scala:36:9] wire [63:0] auto_out_r_bits_data_0 = auto_out_r_bits_data; // @[UserYanker.scala:36:9] wire [1:0] auto_out_r_bits_resp_0 = auto_out_r_bits_resp; // @[UserYanker.scala:36:9] wire auto_out_r_bits_last_0 = auto_out_r_bits_last; // @[UserYanker.scala:36:9] wire nodeIn_aw_ready; // @[MixedNode.scala:551:17] wire nodeIn_aw_valid = auto_in_aw_valid_0; // @[UserYanker.scala:36:9] wire [6:0] nodeIn_aw_bits_id = auto_in_aw_bits_id_0; // @[UserYanker.scala:36:9] wire [31:0] nodeIn_aw_bits_addr = auto_in_aw_bits_addr_0; // @[UserYanker.scala:36:9] wire [7:0] nodeIn_aw_bits_len = auto_in_aw_bits_len_0; // @[UserYanker.scala:36:9] wire [2:0] nodeIn_aw_bits_size = auto_in_aw_bits_size_0; // @[UserYanker.scala:36:9] wire [1:0] nodeIn_aw_bits_burst = auto_in_aw_bits_burst_0; // @[UserYanker.scala:36:9] wire nodeIn_aw_bits_lock = auto_in_aw_bits_lock_0; // @[UserYanker.scala:36:9] wire [3:0] nodeIn_aw_bits_cache = auto_in_aw_bits_cache_0; // @[UserYanker.scala:36:9] wire [2:0] nodeIn_aw_bits_prot = auto_in_aw_bits_prot_0; // @[UserYanker.scala:36:9] wire [3:0] nodeIn_aw_bits_qos = auto_in_aw_bits_qos_0; // @[UserYanker.scala:36:9] wire [3:0] nodeIn_aw_bits_echo_tl_state_size = auto_in_aw_bits_echo_tl_state_size_0; // @[UserYanker.scala:36:9] wire [7:0] nodeIn_aw_bits_echo_tl_state_source = auto_in_aw_bits_echo_tl_state_source_0; // @[UserYanker.scala:36:9] wire nodeIn_aw_bits_echo_extra_id = auto_in_aw_bits_echo_extra_id_0; // @[UserYanker.scala:36:9] wire nodeIn_w_ready; // @[MixedNode.scala:551:17] wire nodeIn_w_valid = auto_in_w_valid_0; // @[UserYanker.scala:36:9] wire [63:0] nodeIn_w_bits_data = auto_in_w_bits_data_0; // @[UserYanker.scala:36:9] wire [7:0] nodeIn_w_bits_strb = auto_in_w_bits_strb_0; // @[UserYanker.scala:36:9] wire nodeIn_w_bits_last = auto_in_w_bits_last_0; // @[UserYanker.scala:36:9] wire nodeIn_b_ready = auto_in_b_ready_0; // @[UserYanker.scala:36:9] wire nodeIn_b_valid; // @[MixedNode.scala:551:17] wire [6:0] nodeIn_b_bits_id; // @[MixedNode.scala:551:17] wire [1:0] nodeIn_b_bits_resp; // @[MixedNode.scala:551:17] wire [3:0] nodeIn_b_bits_echo_tl_state_size; // @[MixedNode.scala:551:17] wire [7:0] nodeIn_b_bits_echo_tl_state_source; // @[MixedNode.scala:551:17] wire nodeIn_b_bits_echo_extra_id; // @[MixedNode.scala:551:17] wire nodeIn_ar_ready; // @[MixedNode.scala:551:17] wire nodeIn_ar_valid = auto_in_ar_valid_0; // @[UserYanker.scala:36:9] wire [6:0] nodeIn_ar_bits_id = auto_in_ar_bits_id_0; // @[UserYanker.scala:36:9] wire [31:0] nodeIn_ar_bits_addr = auto_in_ar_bits_addr_0; // @[UserYanker.scala:36:9] wire [7:0] nodeIn_ar_bits_len = auto_in_ar_bits_len_0; // @[UserYanker.scala:36:9] wire [2:0] nodeIn_ar_bits_size = auto_in_ar_bits_size_0; // @[UserYanker.scala:36:9] wire [1:0] nodeIn_ar_bits_burst = auto_in_ar_bits_burst_0; // @[UserYanker.scala:36:9] wire nodeIn_ar_bits_lock = auto_in_ar_bits_lock_0; // @[UserYanker.scala:36:9] wire [3:0] nodeIn_ar_bits_cache = auto_in_ar_bits_cache_0; // @[UserYanker.scala:36:9] wire [2:0] nodeIn_ar_bits_prot = auto_in_ar_bits_prot_0; // @[UserYanker.scala:36:9] wire [3:0] nodeIn_ar_bits_qos = auto_in_ar_bits_qos_0; // @[UserYanker.scala:36:9] wire [3:0] nodeIn_ar_bits_echo_tl_state_size = auto_in_ar_bits_echo_tl_state_size_0; // @[UserYanker.scala:36:9] wire [7:0] nodeIn_ar_bits_echo_tl_state_source = auto_in_ar_bits_echo_tl_state_source_0; // @[UserYanker.scala:36:9] wire nodeIn_ar_bits_echo_extra_id = auto_in_ar_bits_echo_extra_id_0; // @[UserYanker.scala:36:9] wire nodeIn_r_ready = auto_in_r_ready_0; // @[UserYanker.scala:36:9] wire nodeIn_r_valid; // @[MixedNode.scala:551:17] wire [6:0] nodeIn_r_bits_id; // @[MixedNode.scala:551:17] wire [63:0] nodeIn_r_bits_data; // @[MixedNode.scala:551:17] wire [1:0] nodeIn_r_bits_resp; // @[MixedNode.scala:551:17] wire [3:0] nodeIn_r_bits_echo_tl_state_size; // @[MixedNode.scala:551:17] wire [7:0] nodeIn_r_bits_echo_tl_state_source; // @[MixedNode.scala:551:17] wire nodeIn_r_bits_echo_extra_id; // @[MixedNode.scala:551:17] wire nodeIn_r_bits_last; // @[MixedNode.scala:551:17] wire nodeOut_aw_ready = auto_out_aw_ready_0; // @[UserYanker.scala:36:9] wire nodeOut_aw_valid; // @[MixedNode.scala:542:17] wire [6:0] nodeOut_aw_bits_id; // @[MixedNode.scala:542:17] wire [31:0] nodeOut_aw_bits_addr; // @[MixedNode.scala:542:17] wire [7:0] nodeOut_aw_bits_len; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_aw_bits_size; // @[MixedNode.scala:542:17] wire [1:0] nodeOut_aw_bits_burst; // @[MixedNode.scala:542:17] wire nodeOut_aw_bits_lock; // @[MixedNode.scala:542:17] wire [3:0] nodeOut_aw_bits_cache; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_aw_bits_prot; // @[MixedNode.scala:542:17] wire [3:0] nodeOut_aw_bits_qos; // @[MixedNode.scala:542:17] wire nodeOut_w_ready = auto_out_w_ready_0; // @[UserYanker.scala:36:9] wire nodeOut_w_valid; // @[MixedNode.scala:542:17] wire [63:0] nodeOut_w_bits_data; // @[MixedNode.scala:542:17] wire [7:0] nodeOut_w_bits_strb; // @[MixedNode.scala:542:17] wire nodeOut_w_bits_last; // @[MixedNode.scala:542:17] wire nodeOut_b_ready; // @[MixedNode.scala:542:17] wire nodeOut_b_valid = auto_out_b_valid_0; // @[UserYanker.scala:36:9] wire [6:0] nodeOut_b_bits_id = auto_out_b_bits_id_0; // @[UserYanker.scala:36:9] wire [1:0] nodeOut_b_bits_resp = auto_out_b_bits_resp_0; // @[UserYanker.scala:36:9] wire nodeOut_ar_ready = auto_out_ar_ready_0; // @[UserYanker.scala:36:9] wire nodeOut_ar_valid; // @[MixedNode.scala:542:17] wire [6:0] nodeOut_ar_bits_id; // @[MixedNode.scala:542:17] wire [31:0] nodeOut_ar_bits_addr; // @[MixedNode.scala:542:17] wire [7:0] nodeOut_ar_bits_len; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_ar_bits_size; // @[MixedNode.scala:542:17] wire [1:0] nodeOut_ar_bits_burst; // @[MixedNode.scala:542:17] wire nodeOut_ar_bits_lock; // @[MixedNode.scala:542:17] wire [3:0] nodeOut_ar_bits_cache; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_ar_bits_prot; // @[MixedNode.scala:542:17] wire [3:0] nodeOut_ar_bits_qos; // @[MixedNode.scala:542:17] wire nodeOut_r_ready; // @[MixedNode.scala:542:17] wire nodeOut_r_valid = auto_out_r_valid_0; // @[UserYanker.scala:36:9] wire [6:0] nodeOut_r_bits_id = auto_out_r_bits_id_0; // @[UserYanker.scala:36:9] wire [63:0] nodeOut_r_bits_data = auto_out_r_bits_data_0; // @[UserYanker.scala:36:9] wire [1:0] nodeOut_r_bits_resp = auto_out_r_bits_resp_0; // @[UserYanker.scala:36:9] wire nodeOut_r_bits_last = auto_out_r_bits_last_0; // @[UserYanker.scala:36:9] wire auto_in_aw_ready_0; // @[UserYanker.scala:36:9] wire auto_in_w_ready_0; // @[UserYanker.scala:36:9] wire [3:0] auto_in_b_bits_echo_tl_state_size_0; // @[UserYanker.scala:36:9] wire [7:0] auto_in_b_bits_echo_tl_state_source_0; // @[UserYanker.scala:36:9] wire auto_in_b_bits_echo_extra_id_0; // @[UserYanker.scala:36:9] wire [6:0] auto_in_b_bits_id_0; // @[UserYanker.scala:36:9] wire [1:0] auto_in_b_bits_resp_0; // @[UserYanker.scala:36:9] wire auto_in_b_valid_0; // @[UserYanker.scala:36:9] wire auto_in_ar_ready_0; // @[UserYanker.scala:36:9] wire [3:0] auto_in_r_bits_echo_tl_state_size_0; // @[UserYanker.scala:36:9] wire [7:0] auto_in_r_bits_echo_tl_state_source_0; // @[UserYanker.scala:36:9] wire auto_in_r_bits_echo_extra_id_0; // @[UserYanker.scala:36:9] wire [6:0] auto_in_r_bits_id_0; // @[UserYanker.scala:36:9] wire [63:0] auto_in_r_bits_data_0; // @[UserYanker.scala:36:9] wire [1:0] auto_in_r_bits_resp_0; // @[UserYanker.scala:36:9] wire auto_in_r_bits_last_0; // @[UserYanker.scala:36:9] wire auto_in_r_valid_0; // @[UserYanker.scala:36:9] wire [6:0] auto_out_aw_bits_id_0; // @[UserYanker.scala:36:9] wire [31:0] auto_out_aw_bits_addr_0; // @[UserYanker.scala:36:9] wire [7:0] auto_out_aw_bits_len_0; // @[UserYanker.scala:36:9] wire [2:0] auto_out_aw_bits_size_0; // @[UserYanker.scala:36:9] wire [1:0] auto_out_aw_bits_burst_0; // @[UserYanker.scala:36:9] wire auto_out_aw_bits_lock_0; // @[UserYanker.scala:36:9] wire [3:0] auto_out_aw_bits_cache_0; // @[UserYanker.scala:36:9] wire [2:0] auto_out_aw_bits_prot_0; // @[UserYanker.scala:36:9] wire [3:0] auto_out_aw_bits_qos_0; // @[UserYanker.scala:36:9] wire auto_out_aw_valid_0; // @[UserYanker.scala:36:9] wire [63:0] auto_out_w_bits_data_0; // @[UserYanker.scala:36:9] wire [7:0] auto_out_w_bits_strb_0; // @[UserYanker.scala:36:9] wire auto_out_w_bits_last_0; // @[UserYanker.scala:36:9] wire auto_out_w_valid_0; // @[UserYanker.scala:36:9] wire auto_out_b_ready_0; // @[UserYanker.scala:36:9] wire [6:0] auto_out_ar_bits_id_0; // @[UserYanker.scala:36:9] wire [31:0] auto_out_ar_bits_addr_0; // @[UserYanker.scala:36:9] wire [7:0] auto_out_ar_bits_len_0; // @[UserYanker.scala:36:9] wire [2:0] auto_out_ar_bits_size_0; // @[UserYanker.scala:36:9] wire [1:0] auto_out_ar_bits_burst_0; // @[UserYanker.scala:36:9] wire auto_out_ar_bits_lock_0; // @[UserYanker.scala:36:9] wire [3:0] auto_out_ar_bits_cache_0; // @[UserYanker.scala:36:9] wire [2:0] auto_out_ar_bits_prot_0; // @[UserYanker.scala:36:9] wire [3:0] auto_out_ar_bits_qos_0; // @[UserYanker.scala:36:9] wire auto_out_ar_valid_0; // @[UserYanker.scala:36:9] wire auto_out_r_ready_0; // @[UserYanker.scala:36:9] wire _nodeIn_aw_ready_T; // @[UserYanker.scala:89:36] assign auto_in_aw_ready_0 = nodeIn_aw_ready; // @[UserYanker.scala:36:9] assign nodeOut_aw_bits_id = nodeIn_aw_bits_id; // @[MixedNode.scala:542:17, :551:17] wire [6:0] awsel_shiftAmount = nodeIn_aw_bits_id; // @[OneHot.scala:64:49] assign nodeOut_aw_bits_addr = nodeIn_aw_bits_addr; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_aw_bits_len = nodeIn_aw_bits_len; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_aw_bits_size = nodeIn_aw_bits_size; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_aw_bits_burst = nodeIn_aw_bits_burst; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_aw_bits_lock = nodeIn_aw_bits_lock; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_aw_bits_cache = nodeIn_aw_bits_cache; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_aw_bits_prot = nodeIn_aw_bits_prot; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_aw_bits_qos = nodeIn_aw_bits_qos; // @[MixedNode.scala:542:17, :551:17] assign auto_in_w_ready_0 = nodeIn_w_ready; // @[UserYanker.scala:36:9] assign nodeOut_w_valid = nodeIn_w_valid; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_w_bits_data = nodeIn_w_bits_data; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_w_bits_strb = nodeIn_w_bits_strb; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_w_bits_last = nodeIn_w_bits_last; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_b_ready = nodeIn_b_ready; // @[MixedNode.scala:542:17, :551:17] assign auto_in_b_valid_0 = nodeIn_b_valid; // @[UserYanker.scala:36:9] assign auto_in_b_bits_id_0 = nodeIn_b_bits_id; // @[UserYanker.scala:36:9] assign auto_in_b_bits_resp_0 = nodeIn_b_bits_resp; // @[UserYanker.scala:36:9] assign auto_in_b_bits_echo_tl_state_size_0 = nodeIn_b_bits_echo_tl_state_size; // @[UserYanker.scala:36:9] assign auto_in_b_bits_echo_tl_state_source_0 = nodeIn_b_bits_echo_tl_state_source; // @[UserYanker.scala:36:9] assign auto_in_b_bits_echo_extra_id_0 = nodeIn_b_bits_echo_extra_id; // @[UserYanker.scala:36:9] wire _nodeIn_ar_ready_T; // @[UserYanker.scala:60:36] assign auto_in_ar_ready_0 = nodeIn_ar_ready; // @[UserYanker.scala:36:9] assign nodeOut_ar_bits_id = nodeIn_ar_bits_id; // @[MixedNode.scala:542:17, :551:17] wire [6:0] arsel_shiftAmount = nodeIn_ar_bits_id; // @[OneHot.scala:64:49] assign nodeOut_ar_bits_addr = nodeIn_ar_bits_addr; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_len = nodeIn_ar_bits_len; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_size = nodeIn_ar_bits_size; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_burst = nodeIn_ar_bits_burst; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_lock = nodeIn_ar_bits_lock; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_cache = nodeIn_ar_bits_cache; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_prot = nodeIn_ar_bits_prot; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_qos = nodeIn_ar_bits_qos; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_r_ready = nodeIn_r_ready; // @[MixedNode.scala:542:17, :551:17] assign auto_in_r_valid_0 = nodeIn_r_valid; // @[UserYanker.scala:36:9] assign auto_in_r_bits_id_0 = nodeIn_r_bits_id; // @[UserYanker.scala:36:9] assign auto_in_r_bits_data_0 = nodeIn_r_bits_data; // @[UserYanker.scala:36:9] assign auto_in_r_bits_resp_0 = nodeIn_r_bits_resp; // @[UserYanker.scala:36:9] assign auto_in_r_bits_echo_tl_state_size_0 = nodeIn_r_bits_echo_tl_state_size; // @[UserYanker.scala:36:9] assign auto_in_r_bits_echo_tl_state_source_0 = nodeIn_r_bits_echo_tl_state_source; // @[UserYanker.scala:36:9] assign auto_in_r_bits_echo_extra_id_0 = nodeIn_r_bits_echo_extra_id; // @[UserYanker.scala:36:9] assign auto_in_r_bits_last_0 = nodeIn_r_bits_last; // @[UserYanker.scala:36:9] wire _nodeOut_aw_valid_T; // @[UserYanker.scala:90:36] assign auto_out_aw_valid_0 = nodeOut_aw_valid; // @[UserYanker.scala:36:9] assign auto_out_aw_bits_id_0 = nodeOut_aw_bits_id; // @[UserYanker.scala:36:9] assign auto_out_aw_bits_addr_0 = nodeOut_aw_bits_addr; // @[UserYanker.scala:36:9] assign auto_out_aw_bits_len_0 = nodeOut_aw_bits_len; // @[UserYanker.scala:36:9] assign auto_out_aw_bits_size_0 = nodeOut_aw_bits_size; // @[UserYanker.scala:36:9] assign auto_out_aw_bits_burst_0 = nodeOut_aw_bits_burst; // @[UserYanker.scala:36:9] assign auto_out_aw_bits_lock_0 = nodeOut_aw_bits_lock; // @[UserYanker.scala:36:9] assign auto_out_aw_bits_cache_0 = nodeOut_aw_bits_cache; // @[UserYanker.scala:36:9] assign auto_out_aw_bits_prot_0 = nodeOut_aw_bits_prot; // @[UserYanker.scala:36:9] assign auto_out_aw_bits_qos_0 = nodeOut_aw_bits_qos; // @[UserYanker.scala:36:9] assign nodeIn_w_ready = nodeOut_w_ready; // @[MixedNode.scala:542:17, :551:17] assign auto_out_w_valid_0 = nodeOut_w_valid; // @[UserYanker.scala:36:9] assign auto_out_w_bits_data_0 = nodeOut_w_bits_data; // @[UserYanker.scala:36:9] assign auto_out_w_bits_strb_0 = nodeOut_w_bits_strb; // @[UserYanker.scala:36:9] assign auto_out_w_bits_last_0 = nodeOut_w_bits_last; // @[UserYanker.scala:36:9] assign auto_out_b_ready_0 = nodeOut_b_ready; // @[UserYanker.scala:36:9] assign nodeIn_b_valid = nodeOut_b_valid; // @[MixedNode.scala:542:17, :551:17] assign nodeIn_b_bits_id = nodeOut_b_bits_id; // @[MixedNode.scala:542:17, :551:17] wire [6:0] bsel_shiftAmount = nodeOut_b_bits_id; // @[OneHot.scala:64:49] assign nodeIn_b_bits_resp = nodeOut_b_bits_resp; // @[MixedNode.scala:542:17, :551:17] wire _nodeOut_ar_valid_T; // @[UserYanker.scala:61:36] assign auto_out_ar_valid_0 = nodeOut_ar_valid; // @[UserYanker.scala:36:9] assign auto_out_ar_bits_id_0 = nodeOut_ar_bits_id; // @[UserYanker.scala:36:9] assign auto_out_ar_bits_addr_0 = nodeOut_ar_bits_addr; // @[UserYanker.scala:36:9] assign auto_out_ar_bits_len_0 = nodeOut_ar_bits_len; // @[UserYanker.scala:36:9] assign auto_out_ar_bits_size_0 = nodeOut_ar_bits_size; // @[UserYanker.scala:36:9] assign auto_out_ar_bits_burst_0 = nodeOut_ar_bits_burst; // @[UserYanker.scala:36:9] assign auto_out_ar_bits_lock_0 = nodeOut_ar_bits_lock; // @[UserYanker.scala:36:9] assign auto_out_ar_bits_cache_0 = nodeOut_ar_bits_cache; // @[UserYanker.scala:36:9] assign auto_out_ar_bits_prot_0 = nodeOut_ar_bits_prot; // @[UserYanker.scala:36:9] assign auto_out_ar_bits_qos_0 = nodeOut_ar_bits_qos; // @[UserYanker.scala:36:9] assign auto_out_r_ready_0 = nodeOut_r_ready; // @[UserYanker.scala:36:9] assign nodeIn_r_valid = nodeOut_r_valid; // @[MixedNode.scala:542:17, :551:17] assign nodeIn_r_bits_id = nodeOut_r_bits_id; // @[MixedNode.scala:542:17, :551:17] wire [6:0] rsel_shiftAmount = nodeOut_r_bits_id; // @[OneHot.scala:64:49] assign nodeIn_r_bits_data = nodeOut_r_bits_data; // @[MixedNode.scala:542:17, :551:17] assign nodeIn_r_bits_resp = nodeOut_r_bits_resp; // @[MixedNode.scala:542:17, :551:17] assign nodeIn_r_bits_last = nodeOut_r_bits_last; // @[MixedNode.scala:542:17, :551:17] wire _ar_ready_WIRE_0; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_1; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_2; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_3; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_4; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_5; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_6; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_7; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_8; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_9; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_10; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_11; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_12; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_13; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_14; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_15; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_16; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_17; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_18; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_19; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_20; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_21; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_22; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_23; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_24; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_25; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_26; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_27; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_28; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_29; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_30; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_31; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_32; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_33; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_34; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_35; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_36; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_37; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_38; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_39; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_40; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_41; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_42; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_43; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_44; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_45; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_46; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_47; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_48; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_49; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_50; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_51; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_52; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_53; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_54; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_55; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_56; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_57; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_58; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_59; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_60; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_61; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_62; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_63; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_64; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_65; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_66; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_67; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_68; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_69; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_70; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_71; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_72; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_73; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_74; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_75; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_76; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_77; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_78; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_79; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_80; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_81; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_82; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_83; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_84; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_85; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_86; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_87; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_88; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_89; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_90; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_91; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_92; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_93; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_94; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_95; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_96; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_97; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_98; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_99; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_100; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_101; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_102; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_103; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_104; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_105; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_106; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_107; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_108; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_109; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_110; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_111; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_112; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_113; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_114; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_115; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_116; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_117; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_118; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_119; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_120; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_121; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_122; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_123; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_124; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_125; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_126; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_127; // @[UserYanker.scala:59:29] wire [127:0] _GEN = {{_ar_ready_WIRE_127}, {_ar_ready_WIRE_126}, {_ar_ready_WIRE_125}, {_ar_ready_WIRE_124}, {_ar_ready_WIRE_123}, {_ar_ready_WIRE_122}, {_ar_ready_WIRE_121}, {_ar_ready_WIRE_120}, {_ar_ready_WIRE_119}, {_ar_ready_WIRE_118}, {_ar_ready_WIRE_117}, {_ar_ready_WIRE_116}, {_ar_ready_WIRE_115}, {_ar_ready_WIRE_114}, {_ar_ready_WIRE_113}, {_ar_ready_WIRE_112}, {_ar_ready_WIRE_111}, {_ar_ready_WIRE_110}, {_ar_ready_WIRE_109}, {_ar_ready_WIRE_108}, {_ar_ready_WIRE_107}, {_ar_ready_WIRE_106}, {_ar_ready_WIRE_105}, {_ar_ready_WIRE_104}, {_ar_ready_WIRE_103}, {_ar_ready_WIRE_102}, {_ar_ready_WIRE_101}, {_ar_ready_WIRE_100}, {_ar_ready_WIRE_99}, {_ar_ready_WIRE_98}, {_ar_ready_WIRE_97}, {_ar_ready_WIRE_96}, {_ar_ready_WIRE_95}, {_ar_ready_WIRE_94}, {_ar_ready_WIRE_93}, {_ar_ready_WIRE_92}, {_ar_ready_WIRE_91}, {_ar_ready_WIRE_90}, {_ar_ready_WIRE_89}, {_ar_ready_WIRE_88}, {_ar_ready_WIRE_87}, {_ar_ready_WIRE_86}, {_ar_ready_WIRE_85}, {_ar_ready_WIRE_84}, {_ar_ready_WIRE_83}, {_ar_ready_WIRE_82}, {_ar_ready_WIRE_81}, {_ar_ready_WIRE_80}, {_ar_ready_WIRE_79}, {_ar_ready_WIRE_78}, {_ar_ready_WIRE_77}, {_ar_ready_WIRE_76}, {_ar_ready_WIRE_75}, {_ar_ready_WIRE_74}, {_ar_ready_WIRE_73}, {_ar_ready_WIRE_72}, {_ar_ready_WIRE_71}, {_ar_ready_WIRE_70}, {_ar_ready_WIRE_69}, {_ar_ready_WIRE_68}, {_ar_ready_WIRE_67}, {_ar_ready_WIRE_66}, {_ar_ready_WIRE_65}, {_ar_ready_WIRE_64}, {_ar_ready_WIRE_63}, {_ar_ready_WIRE_62}, {_ar_ready_WIRE_61}, {_ar_ready_WIRE_60}, {_ar_ready_WIRE_59}, {_ar_ready_WIRE_58}, {_ar_ready_WIRE_57}, {_ar_ready_WIRE_56}, {_ar_ready_WIRE_55}, {_ar_ready_WIRE_54}, {_ar_ready_WIRE_53}, {_ar_ready_WIRE_52}, {_ar_ready_WIRE_51}, {_ar_ready_WIRE_50}, {_ar_ready_WIRE_49}, {_ar_ready_WIRE_48}, {_ar_ready_WIRE_47}, {_ar_ready_WIRE_46}, {_ar_ready_WIRE_45}, {_ar_ready_WIRE_44}, {_ar_ready_WIRE_43}, {_ar_ready_WIRE_42}, {_ar_ready_WIRE_41}, {_ar_ready_WIRE_40}, {_ar_ready_WIRE_39}, {_ar_ready_WIRE_38}, {_ar_ready_WIRE_37}, {_ar_ready_WIRE_36}, {_ar_ready_WIRE_35}, {_ar_ready_WIRE_34}, {_ar_ready_WIRE_33}, {_ar_ready_WIRE_32}, {_ar_ready_WIRE_31}, {_ar_ready_WIRE_30}, {_ar_ready_WIRE_29}, {_ar_ready_WIRE_28}, {_ar_ready_WIRE_27}, {_ar_ready_WIRE_26}, {_ar_ready_WIRE_25}, {_ar_ready_WIRE_24}, {_ar_ready_WIRE_23}, {_ar_ready_WIRE_22}, {_ar_ready_WIRE_21}, {_ar_ready_WIRE_20}, {_ar_ready_WIRE_19}, {_ar_ready_WIRE_18}, {_ar_ready_WIRE_17}, {_ar_ready_WIRE_16}, {_ar_ready_WIRE_15}, {_ar_ready_WIRE_14}, {_ar_ready_WIRE_13}, {_ar_ready_WIRE_12}, {_ar_ready_WIRE_11}, {_ar_ready_WIRE_10}, {_ar_ready_WIRE_9}, {_ar_ready_WIRE_8}, {_ar_ready_WIRE_7}, {_ar_ready_WIRE_6}, {_ar_ready_WIRE_5}, {_ar_ready_WIRE_4}, {_ar_ready_WIRE_3}, {_ar_ready_WIRE_2}, {_ar_ready_WIRE_1}, {_ar_ready_WIRE_0}}; // @[UserYanker.scala:59:29, :60:36] assign _nodeIn_ar_ready_T = nodeOut_ar_ready & _GEN[nodeIn_ar_bits_id]; // @[UserYanker.scala:60:36] assign nodeIn_ar_ready = _nodeIn_ar_ready_T; // @[UserYanker.scala:60:36] assign _nodeOut_ar_valid_T = nodeIn_ar_valid & _GEN[nodeIn_ar_bits_id]; // @[UserYanker.scala:60:36, :61:36] assign nodeOut_ar_valid = _nodeOut_ar_valid_T; // @[UserYanker.scala:61:36] wire [3:0] _r_bits_WIRE_0_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_1_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_2_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_3_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_4_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_5_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_6_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_7_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_8_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_9_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_10_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_11_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_12_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_13_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_14_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_15_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_16_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_17_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_18_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_19_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_20_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_21_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_22_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_23_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_24_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_25_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_26_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_27_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_28_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_29_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_30_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_31_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_32_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_33_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_34_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_35_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_36_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_37_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_38_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_39_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_40_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_41_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_42_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_43_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_44_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_45_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_46_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_47_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_48_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_49_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_50_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_51_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_52_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_53_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_54_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_55_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_56_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_57_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_58_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_59_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_60_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_61_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_62_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_63_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_64_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_65_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_66_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_67_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_68_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_69_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_70_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_71_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_72_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_73_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_74_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_75_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_76_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_77_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_78_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_79_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_80_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_81_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_82_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_83_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_84_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_85_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_86_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_87_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_88_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_89_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_90_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_91_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_92_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_93_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_94_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_95_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_96_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_97_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_98_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_99_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_100_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_101_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_102_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_103_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_104_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_105_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_106_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_107_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_108_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_109_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_110_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_111_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_112_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_113_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_114_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_115_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_116_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_117_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_118_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_119_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_120_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_121_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_122_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_123_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_124_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_125_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_126_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_127_tl_state_size; // @[UserYanker.scala:68:27] wire [127:0][3:0] _GEN_0 = {{_r_bits_WIRE_127_tl_state_size}, {_r_bits_WIRE_126_tl_state_size}, {_r_bits_WIRE_125_tl_state_size}, {_r_bits_WIRE_124_tl_state_size}, {_r_bits_WIRE_123_tl_state_size}, {_r_bits_WIRE_122_tl_state_size}, {_r_bits_WIRE_121_tl_state_size}, {_r_bits_WIRE_120_tl_state_size}, {_r_bits_WIRE_119_tl_state_size}, {_r_bits_WIRE_118_tl_state_size}, {_r_bits_WIRE_117_tl_state_size}, {_r_bits_WIRE_116_tl_state_size}, {_r_bits_WIRE_115_tl_state_size}, {_r_bits_WIRE_114_tl_state_size}, {_r_bits_WIRE_113_tl_state_size}, {_r_bits_WIRE_112_tl_state_size}, {_r_bits_WIRE_111_tl_state_size}, {_r_bits_WIRE_110_tl_state_size}, {_r_bits_WIRE_109_tl_state_size}, {_r_bits_WIRE_108_tl_state_size}, {_r_bits_WIRE_107_tl_state_size}, {_r_bits_WIRE_106_tl_state_size}, {_r_bits_WIRE_105_tl_state_size}, {_r_bits_WIRE_104_tl_state_size}, {_r_bits_WIRE_103_tl_state_size}, {_r_bits_WIRE_102_tl_state_size}, {_r_bits_WIRE_101_tl_state_size}, {_r_bits_WIRE_100_tl_state_size}, {_r_bits_WIRE_99_tl_state_size}, {_r_bits_WIRE_98_tl_state_size}, {_r_bits_WIRE_97_tl_state_size}, {_r_bits_WIRE_96_tl_state_size}, {_r_bits_WIRE_95_tl_state_size}, {_r_bits_WIRE_94_tl_state_size}, {_r_bits_WIRE_93_tl_state_size}, {_r_bits_WIRE_92_tl_state_size}, {_r_bits_WIRE_91_tl_state_size}, {_r_bits_WIRE_90_tl_state_size}, {_r_bits_WIRE_89_tl_state_size}, {_r_bits_WIRE_88_tl_state_size}, {_r_bits_WIRE_87_tl_state_size}, {_r_bits_WIRE_86_tl_state_size}, {_r_bits_WIRE_85_tl_state_size}, {_r_bits_WIRE_84_tl_state_size}, {_r_bits_WIRE_83_tl_state_size}, {_r_bits_WIRE_82_tl_state_size}, {_r_bits_WIRE_81_tl_state_size}, {_r_bits_WIRE_80_tl_state_size}, {_r_bits_WIRE_79_tl_state_size}, {_r_bits_WIRE_78_tl_state_size}, {_r_bits_WIRE_77_tl_state_size}, {_r_bits_WIRE_76_tl_state_size}, {_r_bits_WIRE_75_tl_state_size}, {_r_bits_WIRE_74_tl_state_size}, {_r_bits_WIRE_73_tl_state_size}, {_r_bits_WIRE_72_tl_state_size}, {_r_bits_WIRE_71_tl_state_size}, {_r_bits_WIRE_70_tl_state_size}, {_r_bits_WIRE_69_tl_state_size}, {_r_bits_WIRE_68_tl_state_size}, {_r_bits_WIRE_67_tl_state_size}, {_r_bits_WIRE_66_tl_state_size}, {_r_bits_WIRE_65_tl_state_size}, {_r_bits_WIRE_64_tl_state_size}, {_r_bits_WIRE_63_tl_state_size}, {_r_bits_WIRE_62_tl_state_size}, {_r_bits_WIRE_61_tl_state_size}, {_r_bits_WIRE_60_tl_state_size}, {_r_bits_WIRE_59_tl_state_size}, {_r_bits_WIRE_58_tl_state_size}, {_r_bits_WIRE_57_tl_state_size}, {_r_bits_WIRE_56_tl_state_size}, {_r_bits_WIRE_55_tl_state_size}, {_r_bits_WIRE_54_tl_state_size}, {_r_bits_WIRE_53_tl_state_size}, {_r_bits_WIRE_52_tl_state_size}, {_r_bits_WIRE_51_tl_state_size}, {_r_bits_WIRE_50_tl_state_size}, {_r_bits_WIRE_49_tl_state_size}, {_r_bits_WIRE_48_tl_state_size}, {_r_bits_WIRE_47_tl_state_size}, {_r_bits_WIRE_46_tl_state_size}, {_r_bits_WIRE_45_tl_state_size}, {_r_bits_WIRE_44_tl_state_size}, {_r_bits_WIRE_43_tl_state_size}, {_r_bits_WIRE_42_tl_state_size}, {_r_bits_WIRE_41_tl_state_size}, {_r_bits_WIRE_40_tl_state_size}, {_r_bits_WIRE_39_tl_state_size}, {_r_bits_WIRE_38_tl_state_size}, {_r_bits_WIRE_37_tl_state_size}, {_r_bits_WIRE_36_tl_state_size}, {_r_bits_WIRE_35_tl_state_size}, {_r_bits_WIRE_34_tl_state_size}, {_r_bits_WIRE_33_tl_state_size}, {_r_bits_WIRE_32_tl_state_size}, {_r_bits_WIRE_31_tl_state_size}, {_r_bits_WIRE_30_tl_state_size}, {_r_bits_WIRE_29_tl_state_size}, {_r_bits_WIRE_28_tl_state_size}, {_r_bits_WIRE_27_tl_state_size}, {_r_bits_WIRE_26_tl_state_size}, {_r_bits_WIRE_25_tl_state_size}, {_r_bits_WIRE_24_tl_state_size}, {_r_bits_WIRE_23_tl_state_size}, {_r_bits_WIRE_22_tl_state_size}, {_r_bits_WIRE_21_tl_state_size}, {_r_bits_WIRE_20_tl_state_size}, {_r_bits_WIRE_19_tl_state_size}, {_r_bits_WIRE_18_tl_state_size}, {_r_bits_WIRE_17_tl_state_size}, {_r_bits_WIRE_16_tl_state_size}, {_r_bits_WIRE_15_tl_state_size}, {_r_bits_WIRE_14_tl_state_size}, {_r_bits_WIRE_13_tl_state_size}, {_r_bits_WIRE_12_tl_state_size}, {_r_bits_WIRE_11_tl_state_size}, {_r_bits_WIRE_10_tl_state_size}, {_r_bits_WIRE_9_tl_state_size}, {_r_bits_WIRE_8_tl_state_size}, {_r_bits_WIRE_7_tl_state_size}, {_r_bits_WIRE_6_tl_state_size}, {_r_bits_WIRE_5_tl_state_size}, {_r_bits_WIRE_4_tl_state_size}, {_r_bits_WIRE_3_tl_state_size}, {_r_bits_WIRE_2_tl_state_size}, {_r_bits_WIRE_1_tl_state_size}, {_r_bits_WIRE_0_tl_state_size}}; // @[UserYanker.scala:68:27, :73:22] assign nodeIn_r_bits_echo_tl_state_size = _GEN_0[nodeOut_r_bits_id]; // @[UserYanker.scala:73:22] wire [7:0] _r_bits_WIRE_0_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_1_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_2_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_3_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_4_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_5_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_6_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_7_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_8_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_9_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_10_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_11_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_12_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_13_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_14_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_15_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_16_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_17_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_18_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_19_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_20_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_21_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_22_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_23_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_24_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_25_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_26_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_27_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_28_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_29_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_30_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_31_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_32_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_33_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_34_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_35_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_36_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_37_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_38_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_39_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_40_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_41_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_42_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_43_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_44_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_45_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_46_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_47_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_48_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_49_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_50_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_51_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_52_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_53_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_54_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_55_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_56_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_57_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_58_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_59_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_60_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_61_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_62_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_63_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_64_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_65_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_66_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_67_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_68_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_69_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_70_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_71_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_72_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_73_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_74_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_75_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_76_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_77_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_78_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_79_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_80_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_81_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_82_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_83_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_84_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_85_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_86_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_87_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_88_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_89_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_90_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_91_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_92_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_93_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_94_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_95_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_96_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_97_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_98_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_99_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_100_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_101_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_102_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_103_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_104_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_105_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_106_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_107_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_108_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_109_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_110_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_111_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_112_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_113_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_114_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_115_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_116_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_117_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_118_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_119_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_120_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_121_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_122_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_123_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_124_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_125_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_126_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_127_tl_state_source; // @[UserYanker.scala:68:27] wire [127:0][7:0] _GEN_1 = {{_r_bits_WIRE_127_tl_state_source}, {_r_bits_WIRE_126_tl_state_source}, {_r_bits_WIRE_125_tl_state_source}, {_r_bits_WIRE_124_tl_state_source}, {_r_bits_WIRE_123_tl_state_source}, {_r_bits_WIRE_122_tl_state_source}, {_r_bits_WIRE_121_tl_state_source}, {_r_bits_WIRE_120_tl_state_source}, {_r_bits_WIRE_119_tl_state_source}, {_r_bits_WIRE_118_tl_state_source}, {_r_bits_WIRE_117_tl_state_source}, {_r_bits_WIRE_116_tl_state_source}, {_r_bits_WIRE_115_tl_state_source}, {_r_bits_WIRE_114_tl_state_source}, {_r_bits_WIRE_113_tl_state_source}, {_r_bits_WIRE_112_tl_state_source}, {_r_bits_WIRE_111_tl_state_source}, {_r_bits_WIRE_110_tl_state_source}, {_r_bits_WIRE_109_tl_state_source}, {_r_bits_WIRE_108_tl_state_source}, {_r_bits_WIRE_107_tl_state_source}, {_r_bits_WIRE_106_tl_state_source}, {_r_bits_WIRE_105_tl_state_source}, {_r_bits_WIRE_104_tl_state_source}, {_r_bits_WIRE_103_tl_state_source}, {_r_bits_WIRE_102_tl_state_source}, {_r_bits_WIRE_101_tl_state_source}, {_r_bits_WIRE_100_tl_state_source}, {_r_bits_WIRE_99_tl_state_source}, {_r_bits_WIRE_98_tl_state_source}, {_r_bits_WIRE_97_tl_state_source}, {_r_bits_WIRE_96_tl_state_source}, {_r_bits_WIRE_95_tl_state_source}, {_r_bits_WIRE_94_tl_state_source}, {_r_bits_WIRE_93_tl_state_source}, {_r_bits_WIRE_92_tl_state_source}, {_r_bits_WIRE_91_tl_state_source}, {_r_bits_WIRE_90_tl_state_source}, {_r_bits_WIRE_89_tl_state_source}, {_r_bits_WIRE_88_tl_state_source}, {_r_bits_WIRE_87_tl_state_source}, {_r_bits_WIRE_86_tl_state_source}, {_r_bits_WIRE_85_tl_state_source}, {_r_bits_WIRE_84_tl_state_source}, {_r_bits_WIRE_83_tl_state_source}, {_r_bits_WIRE_82_tl_state_source}, {_r_bits_WIRE_81_tl_state_source}, {_r_bits_WIRE_80_tl_state_source}, {_r_bits_WIRE_79_tl_state_source}, {_r_bits_WIRE_78_tl_state_source}, {_r_bits_WIRE_77_tl_state_source}, {_r_bits_WIRE_76_tl_state_source}, {_r_bits_WIRE_75_tl_state_source}, {_r_bits_WIRE_74_tl_state_source}, {_r_bits_WIRE_73_tl_state_source}, {_r_bits_WIRE_72_tl_state_source}, {_r_bits_WIRE_71_tl_state_source}, {_r_bits_WIRE_70_tl_state_source}, {_r_bits_WIRE_69_tl_state_source}, {_r_bits_WIRE_68_tl_state_source}, {_r_bits_WIRE_67_tl_state_source}, {_r_bits_WIRE_66_tl_state_source}, {_r_bits_WIRE_65_tl_state_source}, {_r_bits_WIRE_64_tl_state_source}, {_r_bits_WIRE_63_tl_state_source}, {_r_bits_WIRE_62_tl_state_source}, {_r_bits_WIRE_61_tl_state_source}, {_r_bits_WIRE_60_tl_state_source}, {_r_bits_WIRE_59_tl_state_source}, {_r_bits_WIRE_58_tl_state_source}, {_r_bits_WIRE_57_tl_state_source}, {_r_bits_WIRE_56_tl_state_source}, {_r_bits_WIRE_55_tl_state_source}, {_r_bits_WIRE_54_tl_state_source}, {_r_bits_WIRE_53_tl_state_source}, {_r_bits_WIRE_52_tl_state_source}, {_r_bits_WIRE_51_tl_state_source}, {_r_bits_WIRE_50_tl_state_source}, {_r_bits_WIRE_49_tl_state_source}, {_r_bits_WIRE_48_tl_state_source}, {_r_bits_WIRE_47_tl_state_source}, {_r_bits_WIRE_46_tl_state_source}, {_r_bits_WIRE_45_tl_state_source}, {_r_bits_WIRE_44_tl_state_source}, {_r_bits_WIRE_43_tl_state_source}, {_r_bits_WIRE_42_tl_state_source}, {_r_bits_WIRE_41_tl_state_source}, {_r_bits_WIRE_40_tl_state_source}, {_r_bits_WIRE_39_tl_state_source}, {_r_bits_WIRE_38_tl_state_source}, {_r_bits_WIRE_37_tl_state_source}, {_r_bits_WIRE_36_tl_state_source}, {_r_bits_WIRE_35_tl_state_source}, {_r_bits_WIRE_34_tl_state_source}, {_r_bits_WIRE_33_tl_state_source}, {_r_bits_WIRE_32_tl_state_source}, {_r_bits_WIRE_31_tl_state_source}, {_r_bits_WIRE_30_tl_state_source}, {_r_bits_WIRE_29_tl_state_source}, {_r_bits_WIRE_28_tl_state_source}, {_r_bits_WIRE_27_tl_state_source}, {_r_bits_WIRE_26_tl_state_source}, {_r_bits_WIRE_25_tl_state_source}, {_r_bits_WIRE_24_tl_state_source}, {_r_bits_WIRE_23_tl_state_source}, {_r_bits_WIRE_22_tl_state_source}, {_r_bits_WIRE_21_tl_state_source}, {_r_bits_WIRE_20_tl_state_source}, {_r_bits_WIRE_19_tl_state_source}, {_r_bits_WIRE_18_tl_state_source}, {_r_bits_WIRE_17_tl_state_source}, {_r_bits_WIRE_16_tl_state_source}, {_r_bits_WIRE_15_tl_state_source}, {_r_bits_WIRE_14_tl_state_source}, {_r_bits_WIRE_13_tl_state_source}, {_r_bits_WIRE_12_tl_state_source}, {_r_bits_WIRE_11_tl_state_source}, {_r_bits_WIRE_10_tl_state_source}, {_r_bits_WIRE_9_tl_state_source}, {_r_bits_WIRE_8_tl_state_source}, {_r_bits_WIRE_7_tl_state_source}, {_r_bits_WIRE_6_tl_state_source}, {_r_bits_WIRE_5_tl_state_source}, {_r_bits_WIRE_4_tl_state_source}, {_r_bits_WIRE_3_tl_state_source}, {_r_bits_WIRE_2_tl_state_source}, {_r_bits_WIRE_1_tl_state_source}, {_r_bits_WIRE_0_tl_state_source}}; // @[UserYanker.scala:68:27, :73:22] assign nodeIn_r_bits_echo_tl_state_source = _GEN_1[nodeOut_r_bits_id]; // @[UserYanker.scala:73:22] wire _r_bits_WIRE_0_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_1_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_2_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_3_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_4_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_5_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_6_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_7_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_8_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_9_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_10_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_11_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_12_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_13_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_14_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_15_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_16_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_17_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_18_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_19_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_20_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_21_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_22_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_23_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_24_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_25_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_26_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_27_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_28_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_29_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_30_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_31_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_32_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_33_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_34_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_35_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_36_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_37_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_38_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_39_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_40_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_41_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_42_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_43_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_44_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_45_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_46_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_47_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_48_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_49_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_50_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_51_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_52_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_53_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_54_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_55_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_56_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_57_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_58_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_59_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_60_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_61_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_62_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_63_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_64_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_65_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_66_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_67_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_68_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_69_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_70_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_71_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_72_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_73_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_74_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_75_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_76_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_77_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_78_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_79_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_80_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_81_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_82_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_83_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_84_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_85_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_86_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_87_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_88_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_89_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_90_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_91_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_92_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_93_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_94_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_95_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_96_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_97_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_98_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_99_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_100_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_101_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_102_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_103_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_104_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_105_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_106_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_107_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_108_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_109_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_110_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_111_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_112_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_113_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_114_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_115_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_116_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_117_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_118_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_119_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_120_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_121_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_122_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_123_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_124_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_125_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_126_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_127_extra_id; // @[UserYanker.scala:68:27] wire [127:0] _GEN_2 = {{_r_bits_WIRE_127_extra_id}, {_r_bits_WIRE_126_extra_id}, {_r_bits_WIRE_125_extra_id}, {_r_bits_WIRE_124_extra_id}, {_r_bits_WIRE_123_extra_id}, {_r_bits_WIRE_122_extra_id}, {_r_bits_WIRE_121_extra_id}, {_r_bits_WIRE_120_extra_id}, {_r_bits_WIRE_119_extra_id}, {_r_bits_WIRE_118_extra_id}, {_r_bits_WIRE_117_extra_id}, {_r_bits_WIRE_116_extra_id}, {_r_bits_WIRE_115_extra_id}, {_r_bits_WIRE_114_extra_id}, {_r_bits_WIRE_113_extra_id}, {_r_bits_WIRE_112_extra_id}, {_r_bits_WIRE_111_extra_id}, {_r_bits_WIRE_110_extra_id}, {_r_bits_WIRE_109_extra_id}, {_r_bits_WIRE_108_extra_id}, {_r_bits_WIRE_107_extra_id}, {_r_bits_WIRE_106_extra_id}, {_r_bits_WIRE_105_extra_id}, {_r_bits_WIRE_104_extra_id}, {_r_bits_WIRE_103_extra_id}, {_r_bits_WIRE_102_extra_id}, {_r_bits_WIRE_101_extra_id}, {_r_bits_WIRE_100_extra_id}, {_r_bits_WIRE_99_extra_id}, {_r_bits_WIRE_98_extra_id}, {_r_bits_WIRE_97_extra_id}, {_r_bits_WIRE_96_extra_id}, {_r_bits_WIRE_95_extra_id}, {_r_bits_WIRE_94_extra_id}, {_r_bits_WIRE_93_extra_id}, {_r_bits_WIRE_92_extra_id}, {_r_bits_WIRE_91_extra_id}, {_r_bits_WIRE_90_extra_id}, {_r_bits_WIRE_89_extra_id}, {_r_bits_WIRE_88_extra_id}, {_r_bits_WIRE_87_extra_id}, {_r_bits_WIRE_86_extra_id}, {_r_bits_WIRE_85_extra_id}, {_r_bits_WIRE_84_extra_id}, {_r_bits_WIRE_83_extra_id}, {_r_bits_WIRE_82_extra_id}, {_r_bits_WIRE_81_extra_id}, {_r_bits_WIRE_80_extra_id}, {_r_bits_WIRE_79_extra_id}, {_r_bits_WIRE_78_extra_id}, {_r_bits_WIRE_77_extra_id}, {_r_bits_WIRE_76_extra_id}, {_r_bits_WIRE_75_extra_id}, {_r_bits_WIRE_74_extra_id}, {_r_bits_WIRE_73_extra_id}, {_r_bits_WIRE_72_extra_id}, {_r_bits_WIRE_71_extra_id}, {_r_bits_WIRE_70_extra_id}, {_r_bits_WIRE_69_extra_id}, {_r_bits_WIRE_68_extra_id}, {_r_bits_WIRE_67_extra_id}, {_r_bits_WIRE_66_extra_id}, {_r_bits_WIRE_65_extra_id}, {_r_bits_WIRE_64_extra_id}, {_r_bits_WIRE_63_extra_id}, {_r_bits_WIRE_62_extra_id}, {_r_bits_WIRE_61_extra_id}, {_r_bits_WIRE_60_extra_id}, {_r_bits_WIRE_59_extra_id}, {_r_bits_WIRE_58_extra_id}, {_r_bits_WIRE_57_extra_id}, {_r_bits_WIRE_56_extra_id}, {_r_bits_WIRE_55_extra_id}, {_r_bits_WIRE_54_extra_id}, {_r_bits_WIRE_53_extra_id}, {_r_bits_WIRE_52_extra_id}, {_r_bits_WIRE_51_extra_id}, {_r_bits_WIRE_50_extra_id}, {_r_bits_WIRE_49_extra_id}, {_r_bits_WIRE_48_extra_id}, {_r_bits_WIRE_47_extra_id}, {_r_bits_WIRE_46_extra_id}, {_r_bits_WIRE_45_extra_id}, {_r_bits_WIRE_44_extra_id}, {_r_bits_WIRE_43_extra_id}, {_r_bits_WIRE_42_extra_id}, {_r_bits_WIRE_41_extra_id}, {_r_bits_WIRE_40_extra_id}, {_r_bits_WIRE_39_extra_id}, {_r_bits_WIRE_38_extra_id}, {_r_bits_WIRE_37_extra_id}, {_r_bits_WIRE_36_extra_id}, {_r_bits_WIRE_35_extra_id}, {_r_bits_WIRE_34_extra_id}, {_r_bits_WIRE_33_extra_id}, {_r_bits_WIRE_32_extra_id}, {_r_bits_WIRE_31_extra_id}, {_r_bits_WIRE_30_extra_id}, {_r_bits_WIRE_29_extra_id}, {_r_bits_WIRE_28_extra_id}, {_r_bits_WIRE_27_extra_id}, {_r_bits_WIRE_26_extra_id}, {_r_bits_WIRE_25_extra_id}, {_r_bits_WIRE_24_extra_id}, {_r_bits_WIRE_23_extra_id}, {_r_bits_WIRE_22_extra_id}, {_r_bits_WIRE_21_extra_id}, {_r_bits_WIRE_20_extra_id}, {_r_bits_WIRE_19_extra_id}, {_r_bits_WIRE_18_extra_id}, {_r_bits_WIRE_17_extra_id}, {_r_bits_WIRE_16_extra_id}, {_r_bits_WIRE_15_extra_id}, {_r_bits_WIRE_14_extra_id}, {_r_bits_WIRE_13_extra_id}, {_r_bits_WIRE_12_extra_id}, {_r_bits_WIRE_11_extra_id}, {_r_bits_WIRE_10_extra_id}, {_r_bits_WIRE_9_extra_id}, {_r_bits_WIRE_8_extra_id}, {_r_bits_WIRE_7_extra_id}, {_r_bits_WIRE_6_extra_id}, {_r_bits_WIRE_5_extra_id}, {_r_bits_WIRE_4_extra_id}, {_r_bits_WIRE_3_extra_id}, {_r_bits_WIRE_2_extra_id}, {_r_bits_WIRE_1_extra_id}, {_r_bits_WIRE_0_extra_id}}; // @[UserYanker.scala:68:27, :73:22] assign nodeIn_r_bits_echo_extra_id = _GEN_2[nodeOut_r_bits_id]; // @[UserYanker.scala:73:22] wire [127:0] _arsel_T = 128'h1 << arsel_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [127:0] _arsel_T_1 = _arsel_T; // @[OneHot.scala:65:{12,27}] wire arsel_0 = _arsel_T_1[0]; // @[OneHot.scala:65:27] wire arsel_1 = _arsel_T_1[1]; // @[OneHot.scala:65:27] wire arsel_2 = _arsel_T_1[2]; // @[OneHot.scala:65:27] wire arsel_3 = _arsel_T_1[3]; // @[OneHot.scala:65:27] wire arsel_4 = _arsel_T_1[4]; // @[OneHot.scala:65:27] wire arsel_5 = _arsel_T_1[5]; // @[OneHot.scala:65:27] wire arsel_6 = _arsel_T_1[6]; // @[OneHot.scala:65:27] wire arsel_7 = _arsel_T_1[7]; // @[OneHot.scala:65:27] wire arsel_8 = _arsel_T_1[8]; // @[OneHot.scala:65:27] wire arsel_9 = _arsel_T_1[9]; // @[OneHot.scala:65:27] wire arsel_10 = _arsel_T_1[10]; // @[OneHot.scala:65:27] wire arsel_11 = _arsel_T_1[11]; // @[OneHot.scala:65:27] wire arsel_12 = _arsel_T_1[12]; // @[OneHot.scala:65:27] wire arsel_13 = _arsel_T_1[13]; // @[OneHot.scala:65:27] wire arsel_14 = _arsel_T_1[14]; // @[OneHot.scala:65:27] wire arsel_15 = _arsel_T_1[15]; // @[OneHot.scala:65:27] wire arsel_16 = _arsel_T_1[16]; // @[OneHot.scala:65:27] wire arsel_17 = _arsel_T_1[17]; // @[OneHot.scala:65:27] wire arsel_18 = _arsel_T_1[18]; // @[OneHot.scala:65:27] wire arsel_19 = _arsel_T_1[19]; // @[OneHot.scala:65:27] wire arsel_20 = _arsel_T_1[20]; // @[OneHot.scala:65:27] wire arsel_21 = _arsel_T_1[21]; // @[OneHot.scala:65:27] wire arsel_22 = _arsel_T_1[22]; // @[OneHot.scala:65:27] wire arsel_23 = _arsel_T_1[23]; // @[OneHot.scala:65:27] wire arsel_24 = _arsel_T_1[24]; // @[OneHot.scala:65:27] wire arsel_25 = _arsel_T_1[25]; // @[OneHot.scala:65:27] wire arsel_26 = _arsel_T_1[26]; // @[OneHot.scala:65:27] wire arsel_27 = _arsel_T_1[27]; // @[OneHot.scala:65:27] wire arsel_28 = _arsel_T_1[28]; // @[OneHot.scala:65:27] wire arsel_29 = _arsel_T_1[29]; // @[OneHot.scala:65:27] wire arsel_30 = _arsel_T_1[30]; // @[OneHot.scala:65:27] wire arsel_31 = _arsel_T_1[31]; // @[OneHot.scala:65:27] wire arsel_32 = _arsel_T_1[32]; // @[OneHot.scala:65:27] wire arsel_33 = _arsel_T_1[33]; // @[OneHot.scala:65:27] wire arsel_34 = _arsel_T_1[34]; // @[OneHot.scala:65:27] wire arsel_35 = _arsel_T_1[35]; // @[OneHot.scala:65:27] wire arsel_36 = _arsel_T_1[36]; // @[OneHot.scala:65:27] wire arsel_37 = _arsel_T_1[37]; // @[OneHot.scala:65:27] wire arsel_38 = _arsel_T_1[38]; // @[OneHot.scala:65:27] wire arsel_39 = _arsel_T_1[39]; // @[OneHot.scala:65:27] wire arsel_40 = _arsel_T_1[40]; // @[OneHot.scala:65:27] wire arsel_41 = _arsel_T_1[41]; // @[OneHot.scala:65:27] wire arsel_42 = _arsel_T_1[42]; // @[OneHot.scala:65:27] wire arsel_43 = _arsel_T_1[43]; // @[OneHot.scala:65:27] wire arsel_44 = _arsel_T_1[44]; // @[OneHot.scala:65:27] wire arsel_45 = _arsel_T_1[45]; // @[OneHot.scala:65:27] wire arsel_46 = _arsel_T_1[46]; // @[OneHot.scala:65:27] wire arsel_47 = _arsel_T_1[47]; // @[OneHot.scala:65:27] wire arsel_48 = _arsel_T_1[48]; // @[OneHot.scala:65:27] wire arsel_49 = _arsel_T_1[49]; // @[OneHot.scala:65:27] wire arsel_50 = _arsel_T_1[50]; // @[OneHot.scala:65:27] wire arsel_51 = _arsel_T_1[51]; // @[OneHot.scala:65:27] wire arsel_52 = _arsel_T_1[52]; // @[OneHot.scala:65:27] wire arsel_53 = _arsel_T_1[53]; // @[OneHot.scala:65:27] wire arsel_54 = _arsel_T_1[54]; // @[OneHot.scala:65:27] wire arsel_55 = _arsel_T_1[55]; // @[OneHot.scala:65:27] wire arsel_56 = _arsel_T_1[56]; // @[OneHot.scala:65:27] wire arsel_57 = _arsel_T_1[57]; // @[OneHot.scala:65:27] wire arsel_58 = _arsel_T_1[58]; // @[OneHot.scala:65:27] wire arsel_59 = _arsel_T_1[59]; // @[OneHot.scala:65:27] wire arsel_60 = _arsel_T_1[60]; // @[OneHot.scala:65:27] wire arsel_61 = _arsel_T_1[61]; // @[OneHot.scala:65:27] wire arsel_62 = _arsel_T_1[62]; // @[OneHot.scala:65:27] wire arsel_63 = _arsel_T_1[63]; // @[OneHot.scala:65:27] wire arsel_64 = _arsel_T_1[64]; // @[OneHot.scala:65:27] wire arsel_65 = _arsel_T_1[65]; // @[OneHot.scala:65:27] wire arsel_66 = _arsel_T_1[66]; // @[OneHot.scala:65:27] wire arsel_67 = _arsel_T_1[67]; // @[OneHot.scala:65:27] wire arsel_68 = _arsel_T_1[68]; // @[OneHot.scala:65:27] wire arsel_69 = _arsel_T_1[69]; // @[OneHot.scala:65:27] wire arsel_70 = _arsel_T_1[70]; // @[OneHot.scala:65:27] wire arsel_71 = _arsel_T_1[71]; // @[OneHot.scala:65:27] wire arsel_72 = _arsel_T_1[72]; // @[OneHot.scala:65:27] wire arsel_73 = _arsel_T_1[73]; // @[OneHot.scala:65:27] wire arsel_74 = _arsel_T_1[74]; // @[OneHot.scala:65:27] wire arsel_75 = _arsel_T_1[75]; // @[OneHot.scala:65:27] wire arsel_76 = _arsel_T_1[76]; // @[OneHot.scala:65:27] wire arsel_77 = _arsel_T_1[77]; // @[OneHot.scala:65:27] wire arsel_78 = _arsel_T_1[78]; // @[OneHot.scala:65:27] wire arsel_79 = _arsel_T_1[79]; // @[OneHot.scala:65:27] wire arsel_80 = _arsel_T_1[80]; // @[OneHot.scala:65:27] wire arsel_81 = _arsel_T_1[81]; // @[OneHot.scala:65:27] wire arsel_82 = _arsel_T_1[82]; // @[OneHot.scala:65:27] wire arsel_83 = _arsel_T_1[83]; // @[OneHot.scala:65:27] wire arsel_84 = _arsel_T_1[84]; // @[OneHot.scala:65:27] wire arsel_85 = _arsel_T_1[85]; // @[OneHot.scala:65:27] wire arsel_86 = _arsel_T_1[86]; // @[OneHot.scala:65:27] wire arsel_87 = _arsel_T_1[87]; // @[OneHot.scala:65:27] wire arsel_88 = _arsel_T_1[88]; // @[OneHot.scala:65:27] wire arsel_89 = _arsel_T_1[89]; // @[OneHot.scala:65:27] wire arsel_90 = _arsel_T_1[90]; // @[OneHot.scala:65:27] wire arsel_91 = _arsel_T_1[91]; // @[OneHot.scala:65:27] wire arsel_92 = _arsel_T_1[92]; // @[OneHot.scala:65:27] wire arsel_93 = _arsel_T_1[93]; // @[OneHot.scala:65:27] wire arsel_94 = _arsel_T_1[94]; // @[OneHot.scala:65:27] wire arsel_95 = _arsel_T_1[95]; // @[OneHot.scala:65:27] wire arsel_96 = _arsel_T_1[96]; // @[OneHot.scala:65:27] wire arsel_97 = _arsel_T_1[97]; // @[OneHot.scala:65:27] wire arsel_98 = _arsel_T_1[98]; // @[OneHot.scala:65:27] wire arsel_99 = _arsel_T_1[99]; // @[OneHot.scala:65:27] wire arsel_100 = _arsel_T_1[100]; // @[OneHot.scala:65:27] wire arsel_101 = _arsel_T_1[101]; // @[OneHot.scala:65:27] wire arsel_102 = _arsel_T_1[102]; // @[OneHot.scala:65:27] wire arsel_103 = _arsel_T_1[103]; // @[OneHot.scala:65:27] wire arsel_104 = _arsel_T_1[104]; // @[OneHot.scala:65:27] wire arsel_105 = _arsel_T_1[105]; // @[OneHot.scala:65:27] wire arsel_106 = _arsel_T_1[106]; // @[OneHot.scala:65:27] wire arsel_107 = _arsel_T_1[107]; // @[OneHot.scala:65:27] wire arsel_108 = _arsel_T_1[108]; // @[OneHot.scala:65:27] wire arsel_109 = _arsel_T_1[109]; // @[OneHot.scala:65:27] wire arsel_110 = _arsel_T_1[110]; // @[OneHot.scala:65:27] wire arsel_111 = _arsel_T_1[111]; // @[OneHot.scala:65:27] wire arsel_112 = _arsel_T_1[112]; // @[OneHot.scala:65:27] wire arsel_113 = _arsel_T_1[113]; // @[OneHot.scala:65:27] wire arsel_114 = _arsel_T_1[114]; // @[OneHot.scala:65:27] wire arsel_115 = _arsel_T_1[115]; // @[OneHot.scala:65:27] wire arsel_116 = _arsel_T_1[116]; // @[OneHot.scala:65:27] wire arsel_117 = _arsel_T_1[117]; // @[OneHot.scala:65:27] wire arsel_118 = _arsel_T_1[118]; // @[OneHot.scala:65:27] wire arsel_119 = _arsel_T_1[119]; // @[OneHot.scala:65:27] wire arsel_120 = _arsel_T_1[120]; // @[OneHot.scala:65:27] wire arsel_121 = _arsel_T_1[121]; // @[OneHot.scala:65:27] wire arsel_122 = _arsel_T_1[122]; // @[OneHot.scala:65:27] wire arsel_123 = _arsel_T_1[123]; // @[OneHot.scala:65:27] wire arsel_124 = _arsel_T_1[124]; // @[OneHot.scala:65:27] wire arsel_125 = _arsel_T_1[125]; // @[OneHot.scala:65:27] wire arsel_126 = _arsel_T_1[126]; // @[OneHot.scala:65:27] wire arsel_127 = _arsel_T_1[127]; // @[OneHot.scala:65:27] wire [127:0] _rsel_T = 128'h1 << rsel_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [127:0] _rsel_T_1 = _rsel_T; // @[OneHot.scala:65:{12,27}] wire rsel_0 = _rsel_T_1[0]; // @[OneHot.scala:65:27] wire rsel_1 = _rsel_T_1[1]; // @[OneHot.scala:65:27] wire rsel_2 = _rsel_T_1[2]; // @[OneHot.scala:65:27] wire rsel_3 = _rsel_T_1[3]; // @[OneHot.scala:65:27] wire rsel_4 = _rsel_T_1[4]; // @[OneHot.scala:65:27] wire rsel_5 = _rsel_T_1[5]; // @[OneHot.scala:65:27] wire rsel_6 = _rsel_T_1[6]; // @[OneHot.scala:65:27] wire rsel_7 = _rsel_T_1[7]; // @[OneHot.scala:65:27] wire rsel_8 = _rsel_T_1[8]; // @[OneHot.scala:65:27] wire rsel_9 = _rsel_T_1[9]; // @[OneHot.scala:65:27] wire rsel_10 = _rsel_T_1[10]; // @[OneHot.scala:65:27] wire rsel_11 = _rsel_T_1[11]; // @[OneHot.scala:65:27] wire rsel_12 = _rsel_T_1[12]; // @[OneHot.scala:65:27] wire rsel_13 = _rsel_T_1[13]; // @[OneHot.scala:65:27] wire rsel_14 = _rsel_T_1[14]; // @[OneHot.scala:65:27] wire rsel_15 = _rsel_T_1[15]; // @[OneHot.scala:65:27] wire rsel_16 = _rsel_T_1[16]; // @[OneHot.scala:65:27] wire rsel_17 = _rsel_T_1[17]; // @[OneHot.scala:65:27] wire rsel_18 = _rsel_T_1[18]; // @[OneHot.scala:65:27] wire rsel_19 = _rsel_T_1[19]; // @[OneHot.scala:65:27] wire rsel_20 = _rsel_T_1[20]; // @[OneHot.scala:65:27] wire rsel_21 = _rsel_T_1[21]; // @[OneHot.scala:65:27] wire rsel_22 = _rsel_T_1[22]; // @[OneHot.scala:65:27] wire rsel_23 = _rsel_T_1[23]; // @[OneHot.scala:65:27] wire rsel_24 = _rsel_T_1[24]; // @[OneHot.scala:65:27] wire rsel_25 = _rsel_T_1[25]; // @[OneHot.scala:65:27] wire rsel_26 = _rsel_T_1[26]; // @[OneHot.scala:65:27] wire rsel_27 = _rsel_T_1[27]; // @[OneHot.scala:65:27] wire rsel_28 = _rsel_T_1[28]; // @[OneHot.scala:65:27] wire rsel_29 = _rsel_T_1[29]; // @[OneHot.scala:65:27] wire rsel_30 = _rsel_T_1[30]; // @[OneHot.scala:65:27] wire rsel_31 = _rsel_T_1[31]; // @[OneHot.scala:65:27] wire rsel_32 = _rsel_T_1[32]; // @[OneHot.scala:65:27] wire rsel_33 = _rsel_T_1[33]; // @[OneHot.scala:65:27] wire rsel_34 = _rsel_T_1[34]; // @[OneHot.scala:65:27] wire rsel_35 = _rsel_T_1[35]; // @[OneHot.scala:65:27] wire rsel_36 = _rsel_T_1[36]; // @[OneHot.scala:65:27] wire rsel_37 = _rsel_T_1[37]; // @[OneHot.scala:65:27] wire rsel_38 = _rsel_T_1[38]; // @[OneHot.scala:65:27] wire rsel_39 = _rsel_T_1[39]; // @[OneHot.scala:65:27] wire rsel_40 = _rsel_T_1[40]; // @[OneHot.scala:65:27] wire rsel_41 = _rsel_T_1[41]; // @[OneHot.scala:65:27] wire rsel_42 = _rsel_T_1[42]; // @[OneHot.scala:65:27] wire rsel_43 = _rsel_T_1[43]; // @[OneHot.scala:65:27] wire rsel_44 = _rsel_T_1[44]; // @[OneHot.scala:65:27] wire rsel_45 = _rsel_T_1[45]; // @[OneHot.scala:65:27] wire rsel_46 = _rsel_T_1[46]; // @[OneHot.scala:65:27] wire rsel_47 = _rsel_T_1[47]; // @[OneHot.scala:65:27] wire rsel_48 = _rsel_T_1[48]; // @[OneHot.scala:65:27] wire rsel_49 = _rsel_T_1[49]; // @[OneHot.scala:65:27] wire rsel_50 = _rsel_T_1[50]; // @[OneHot.scala:65:27] wire rsel_51 = _rsel_T_1[51]; // @[OneHot.scala:65:27] wire rsel_52 = _rsel_T_1[52]; // @[OneHot.scala:65:27] wire rsel_53 = _rsel_T_1[53]; // @[OneHot.scala:65:27] wire rsel_54 = _rsel_T_1[54]; // @[OneHot.scala:65:27] wire rsel_55 = _rsel_T_1[55]; // @[OneHot.scala:65:27] wire rsel_56 = _rsel_T_1[56]; // @[OneHot.scala:65:27] wire rsel_57 = _rsel_T_1[57]; // @[OneHot.scala:65:27] wire rsel_58 = _rsel_T_1[58]; // @[OneHot.scala:65:27] wire rsel_59 = _rsel_T_1[59]; // @[OneHot.scala:65:27] wire rsel_60 = _rsel_T_1[60]; // @[OneHot.scala:65:27] wire rsel_61 = _rsel_T_1[61]; // @[OneHot.scala:65:27] wire rsel_62 = _rsel_T_1[62]; // @[OneHot.scala:65:27] wire rsel_63 = _rsel_T_1[63]; // @[OneHot.scala:65:27] wire rsel_64 = _rsel_T_1[64]; // @[OneHot.scala:65:27] wire rsel_65 = _rsel_T_1[65]; // @[OneHot.scala:65:27] wire rsel_66 = _rsel_T_1[66]; // @[OneHot.scala:65:27] wire rsel_67 = _rsel_T_1[67]; // @[OneHot.scala:65:27] wire rsel_68 = _rsel_T_1[68]; // @[OneHot.scala:65:27] wire rsel_69 = _rsel_T_1[69]; // @[OneHot.scala:65:27] wire rsel_70 = _rsel_T_1[70]; // @[OneHot.scala:65:27] wire rsel_71 = _rsel_T_1[71]; // @[OneHot.scala:65:27] wire rsel_72 = _rsel_T_1[72]; // @[OneHot.scala:65:27] wire rsel_73 = _rsel_T_1[73]; // @[OneHot.scala:65:27] wire rsel_74 = _rsel_T_1[74]; // @[OneHot.scala:65:27] wire rsel_75 = _rsel_T_1[75]; // @[OneHot.scala:65:27] wire rsel_76 = _rsel_T_1[76]; // @[OneHot.scala:65:27] wire rsel_77 = _rsel_T_1[77]; // @[OneHot.scala:65:27] wire rsel_78 = _rsel_T_1[78]; // @[OneHot.scala:65:27] wire rsel_79 = _rsel_T_1[79]; // @[OneHot.scala:65:27] wire rsel_80 = _rsel_T_1[80]; // @[OneHot.scala:65:27] wire rsel_81 = _rsel_T_1[81]; // @[OneHot.scala:65:27] wire rsel_82 = _rsel_T_1[82]; // @[OneHot.scala:65:27] wire rsel_83 = _rsel_T_1[83]; // @[OneHot.scala:65:27] wire rsel_84 = _rsel_T_1[84]; // @[OneHot.scala:65:27] wire rsel_85 = _rsel_T_1[85]; // @[OneHot.scala:65:27] wire rsel_86 = _rsel_T_1[86]; // @[OneHot.scala:65:27] wire rsel_87 = _rsel_T_1[87]; // @[OneHot.scala:65:27] wire rsel_88 = _rsel_T_1[88]; // @[OneHot.scala:65:27] wire rsel_89 = _rsel_T_1[89]; // @[OneHot.scala:65:27] wire rsel_90 = _rsel_T_1[90]; // @[OneHot.scala:65:27] wire rsel_91 = _rsel_T_1[91]; // @[OneHot.scala:65:27] wire rsel_92 = _rsel_T_1[92]; // @[OneHot.scala:65:27] wire rsel_93 = _rsel_T_1[93]; // @[OneHot.scala:65:27] wire rsel_94 = _rsel_T_1[94]; // @[OneHot.scala:65:27] wire rsel_95 = _rsel_T_1[95]; // @[OneHot.scala:65:27] wire rsel_96 = _rsel_T_1[96]; // @[OneHot.scala:65:27] wire rsel_97 = _rsel_T_1[97]; // @[OneHot.scala:65:27] wire rsel_98 = _rsel_T_1[98]; // @[OneHot.scala:65:27] wire rsel_99 = _rsel_T_1[99]; // @[OneHot.scala:65:27] wire rsel_100 = _rsel_T_1[100]; // @[OneHot.scala:65:27] wire rsel_101 = _rsel_T_1[101]; // @[OneHot.scala:65:27] wire rsel_102 = _rsel_T_1[102]; // @[OneHot.scala:65:27] wire rsel_103 = _rsel_T_1[103]; // @[OneHot.scala:65:27] wire rsel_104 = _rsel_T_1[104]; // @[OneHot.scala:65:27] wire rsel_105 = _rsel_T_1[105]; // @[OneHot.scala:65:27] wire rsel_106 = _rsel_T_1[106]; // @[OneHot.scala:65:27] wire rsel_107 = _rsel_T_1[107]; // @[OneHot.scala:65:27] wire rsel_108 = _rsel_T_1[108]; // @[OneHot.scala:65:27] wire rsel_109 = _rsel_T_1[109]; // @[OneHot.scala:65:27] wire rsel_110 = _rsel_T_1[110]; // @[OneHot.scala:65:27] wire rsel_111 = _rsel_T_1[111]; // @[OneHot.scala:65:27] wire rsel_112 = _rsel_T_1[112]; // @[OneHot.scala:65:27] wire rsel_113 = _rsel_T_1[113]; // @[OneHot.scala:65:27] wire rsel_114 = _rsel_T_1[114]; // @[OneHot.scala:65:27] wire rsel_115 = _rsel_T_1[115]; // @[OneHot.scala:65:27] wire rsel_116 = _rsel_T_1[116]; // @[OneHot.scala:65:27] wire rsel_117 = _rsel_T_1[117]; // @[OneHot.scala:65:27] wire rsel_118 = _rsel_T_1[118]; // @[OneHot.scala:65:27] wire rsel_119 = _rsel_T_1[119]; // @[OneHot.scala:65:27] wire rsel_120 = _rsel_T_1[120]; // @[OneHot.scala:65:27] wire rsel_121 = _rsel_T_1[121]; // @[OneHot.scala:65:27] wire rsel_122 = _rsel_T_1[122]; // @[OneHot.scala:65:27] wire rsel_123 = _rsel_T_1[123]; // @[OneHot.scala:65:27] wire rsel_124 = _rsel_T_1[124]; // @[OneHot.scala:65:27] wire rsel_125 = _rsel_T_1[125]; // @[OneHot.scala:65:27] wire rsel_126 = _rsel_T_1[126]; // @[OneHot.scala:65:27] wire rsel_127 = _rsel_T_1[127]; // @[OneHot.scala:65:27] wire _T_640 = nodeOut_r_valid & nodeIn_r_ready; // @[UserYanker.scala:78:37] wire _T_643 = nodeIn_ar_valid & nodeOut_ar_ready; // @[UserYanker.scala:81:37] wire _aw_ready_WIRE_0; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_1; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_2; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_3; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_4; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_5; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_6; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_7; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_8; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_9; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_10; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_11; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_12; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_13; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_14; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_15; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_16; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_17; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_18; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_19; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_20; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_21; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_22; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_23; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_24; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_25; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_26; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_27; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_28; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_29; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_30; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_31; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_32; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_33; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_34; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_35; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_36; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_37; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_38; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_39; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_40; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_41; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_42; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_43; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_44; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_45; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_46; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_47; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_48; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_49; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_50; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_51; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_52; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_53; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_54; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_55; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_56; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_57; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_58; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_59; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_60; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_61; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_62; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_63; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_64; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_65; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_66; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_67; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_68; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_69; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_70; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_71; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_72; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_73; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_74; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_75; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_76; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_77; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_78; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_79; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_80; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_81; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_82; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_83; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_84; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_85; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_86; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_87; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_88; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_89; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_90; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_91; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_92; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_93; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_94; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_95; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_96; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_97; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_98; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_99; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_100; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_101; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_102; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_103; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_104; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_105; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_106; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_107; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_108; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_109; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_110; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_111; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_112; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_113; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_114; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_115; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_116; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_117; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_118; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_119; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_120; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_121; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_122; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_123; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_124; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_125; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_126; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_127; // @[UserYanker.scala:88:29] wire [127:0] _GEN_3 = {{_aw_ready_WIRE_127}, {_aw_ready_WIRE_126}, {_aw_ready_WIRE_125}, {_aw_ready_WIRE_124}, {_aw_ready_WIRE_123}, {_aw_ready_WIRE_122}, {_aw_ready_WIRE_121}, {_aw_ready_WIRE_120}, {_aw_ready_WIRE_119}, {_aw_ready_WIRE_118}, {_aw_ready_WIRE_117}, {_aw_ready_WIRE_116}, {_aw_ready_WIRE_115}, {_aw_ready_WIRE_114}, {_aw_ready_WIRE_113}, {_aw_ready_WIRE_112}, {_aw_ready_WIRE_111}, {_aw_ready_WIRE_110}, {_aw_ready_WIRE_109}, {_aw_ready_WIRE_108}, {_aw_ready_WIRE_107}, {_aw_ready_WIRE_106}, {_aw_ready_WIRE_105}, {_aw_ready_WIRE_104}, {_aw_ready_WIRE_103}, {_aw_ready_WIRE_102}, {_aw_ready_WIRE_101}, {_aw_ready_WIRE_100}, {_aw_ready_WIRE_99}, {_aw_ready_WIRE_98}, {_aw_ready_WIRE_97}, {_aw_ready_WIRE_96}, {_aw_ready_WIRE_95}, {_aw_ready_WIRE_94}, {_aw_ready_WIRE_93}, {_aw_ready_WIRE_92}, {_aw_ready_WIRE_91}, {_aw_ready_WIRE_90}, {_aw_ready_WIRE_89}, {_aw_ready_WIRE_88}, {_aw_ready_WIRE_87}, {_aw_ready_WIRE_86}, {_aw_ready_WIRE_85}, {_aw_ready_WIRE_84}, {_aw_ready_WIRE_83}, {_aw_ready_WIRE_82}, {_aw_ready_WIRE_81}, {_aw_ready_WIRE_80}, {_aw_ready_WIRE_79}, {_aw_ready_WIRE_78}, {_aw_ready_WIRE_77}, {_aw_ready_WIRE_76}, {_aw_ready_WIRE_75}, {_aw_ready_WIRE_74}, {_aw_ready_WIRE_73}, {_aw_ready_WIRE_72}, {_aw_ready_WIRE_71}, {_aw_ready_WIRE_70}, {_aw_ready_WIRE_69}, {_aw_ready_WIRE_68}, {_aw_ready_WIRE_67}, {_aw_ready_WIRE_66}, {_aw_ready_WIRE_65}, {_aw_ready_WIRE_64}, {_aw_ready_WIRE_63}, {_aw_ready_WIRE_62}, {_aw_ready_WIRE_61}, {_aw_ready_WIRE_60}, {_aw_ready_WIRE_59}, {_aw_ready_WIRE_58}, {_aw_ready_WIRE_57}, {_aw_ready_WIRE_56}, {_aw_ready_WIRE_55}, {_aw_ready_WIRE_54}, {_aw_ready_WIRE_53}, {_aw_ready_WIRE_52}, {_aw_ready_WIRE_51}, {_aw_ready_WIRE_50}, {_aw_ready_WIRE_49}, {_aw_ready_WIRE_48}, {_aw_ready_WIRE_47}, {_aw_ready_WIRE_46}, {_aw_ready_WIRE_45}, {_aw_ready_WIRE_44}, {_aw_ready_WIRE_43}, {_aw_ready_WIRE_42}, {_aw_ready_WIRE_41}, {_aw_ready_WIRE_40}, {_aw_ready_WIRE_39}, {_aw_ready_WIRE_38}, {_aw_ready_WIRE_37}, {_aw_ready_WIRE_36}, {_aw_ready_WIRE_35}, {_aw_ready_WIRE_34}, {_aw_ready_WIRE_33}, {_aw_ready_WIRE_32}, {_aw_ready_WIRE_31}, {_aw_ready_WIRE_30}, {_aw_ready_WIRE_29}, {_aw_ready_WIRE_28}, {_aw_ready_WIRE_27}, {_aw_ready_WIRE_26}, {_aw_ready_WIRE_25}, {_aw_ready_WIRE_24}, {_aw_ready_WIRE_23}, {_aw_ready_WIRE_22}, {_aw_ready_WIRE_21}, {_aw_ready_WIRE_20}, {_aw_ready_WIRE_19}, {_aw_ready_WIRE_18}, {_aw_ready_WIRE_17}, {_aw_ready_WIRE_16}, {_aw_ready_WIRE_15}, {_aw_ready_WIRE_14}, {_aw_ready_WIRE_13}, {_aw_ready_WIRE_12}, {_aw_ready_WIRE_11}, {_aw_ready_WIRE_10}, {_aw_ready_WIRE_9}, {_aw_ready_WIRE_8}, {_aw_ready_WIRE_7}, {_aw_ready_WIRE_6}, {_aw_ready_WIRE_5}, {_aw_ready_WIRE_4}, {_aw_ready_WIRE_3}, {_aw_ready_WIRE_2}, {_aw_ready_WIRE_1}, {_aw_ready_WIRE_0}}; // @[UserYanker.scala:88:29, :89:36] assign _nodeIn_aw_ready_T = nodeOut_aw_ready & _GEN_3[nodeIn_aw_bits_id]; // @[UserYanker.scala:89:36] assign nodeIn_aw_ready = _nodeIn_aw_ready_T; // @[UserYanker.scala:89:36] assign _nodeOut_aw_valid_T = nodeIn_aw_valid & _GEN_3[nodeIn_aw_bits_id]; // @[UserYanker.scala:89:36, :90:36] assign nodeOut_aw_valid = _nodeOut_aw_valid_T; // @[UserYanker.scala:90:36] wire _r_valid_WIRE_0; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_1; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_2; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_3; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_4; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_5; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_6; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_7; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_8; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_9; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_10; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_11; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_12; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_13; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_14; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_15; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_16; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_17; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_18; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_19; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_20; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_21; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_22; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_23; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_24; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_25; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_26; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_27; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_28; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_29; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_30; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_31; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_32; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_33; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_34; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_35; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_36; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_37; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_38; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_39; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_40; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_41; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_42; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_43; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_44; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_45; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_46; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_47; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_48; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_49; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_50; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_51; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_52; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_53; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_54; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_55; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_56; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_57; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_58; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_59; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_60; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_61; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_62; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_63; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_64; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_65; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_66; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_67; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_68; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_69; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_70; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_71; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_72; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_73; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_74; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_75; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_76; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_77; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_78; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_79; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_80; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_81; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_82; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_83; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_84; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_85; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_86; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_87; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_88; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_89; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_90; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_91; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_92; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_93; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_94; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_95; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_96; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_97; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_98; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_99; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_100; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_101; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_102; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_103; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_104; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_105; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_106; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_107; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_108; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_109; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_110; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_111; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_112; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_113; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_114; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_115; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_116; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_117; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_118; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_119; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_120; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_121; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_122; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_123; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_124; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_125; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_126; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_127; // @[UserYanker.scala:67:28] wire _b_valid_WIRE_0; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_1; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_2; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_3; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_4; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_5; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_6; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_7; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_8; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_9; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_10; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_11; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_12; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_13; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_14; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_15; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_16; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_17; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_18; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_19; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_20; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_21; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_22; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_23; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_24; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_25; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_26; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_27; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_28; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_29; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_30; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_31; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_32; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_33; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_34; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_35; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_36; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_37; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_38; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_39; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_40; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_41; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_42; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_43; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_44; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_45; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_46; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_47; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_48; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_49; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_50; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_51; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_52; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_53; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_54; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_55; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_56; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_57; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_58; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_59; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_60; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_61; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_62; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_63; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_64; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_65; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_66; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_67; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_68; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_69; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_70; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_71; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_72; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_73; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_74; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_75; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_76; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_77; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_78; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_79; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_80; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_81; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_82; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_83; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_84; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_85; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_86; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_87; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_88; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_89; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_90; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_91; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_92; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_93; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_94; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_95; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_96; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_97; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_98; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_99; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_100; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_101; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_102; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_103; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_104; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_105; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_106; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_107; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_108; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_109; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_110; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_111; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_112; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_113; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_114; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_115; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_116; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_117; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_118; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_119; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_120; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_121; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_122; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_123; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_124; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_125; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_126; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_127; // @[UserYanker.scala:96:28]