prompt
stringlengths 162
4.26M
| response
stringlengths 109
5.16M
|
---|---|
Generate the Verilog code corresponding to this Chisel code // See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3.{Data, SyncReadMem, Vec}
import chisel3.util.log2Ceil
object DescribedSRAM {
def apply[T <: Data](
name: String,
desc: String,
size: BigInt, // depth
data: T
): SyncReadMem[T] = {
val mem = SyncReadMem(size, data)
mem.suggestName(name)
val granWidth = data match {
case v: Vec[_] => v.head.getWidth
case d => d.getWidth
}
val uid = 0
Annotated.srams(
component = mem,
name = name,
address_width = log2Ceil(size),
data_width = data.getWidth,
depth = size,
description = desc,
write_mask_granularity = granWidth
)
mem
}
} | module array_0_0_0(
input [8:0] R0_addr,
input R0_en,
input R0_clk,
output [63:0] R0_data,
input [8:0] W0_addr,
input W0_en,
input W0_clk,
input [63:0] W0_data
);
array_0_0_0_ext array_0_0_0_ext (
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data)
);
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
/** This black-boxes an Async Reset
* (or Set)
* Register.
*
* Because Chisel doesn't support
* parameterized black boxes,
* we unfortunately have to
* instantiate a number of these.
*
* We also have to hard-code the set/
* reset behavior.
*
* Do not confuse an asynchronous
* reset signal with an asynchronously
* reset reg. You should still
* properly synchronize your reset
* deassertion.
*
* @param d Data input
* @param q Data Output
* @param clk Clock Input
* @param rst Reset Input
* @param en Write Enable Input
*
*/
class AsyncResetReg(resetValue: Int = 0) extends RawModule {
val io = IO(new Bundle {
val d = Input(Bool())
val q = Output(Bool())
val en = Input(Bool())
val clk = Input(Clock())
val rst = Input(Reset())
})
val reg = withClockAndReset(io.clk, io.rst.asAsyncReset)(RegInit(resetValue.U(1.W)))
when (io.en) {
reg := io.d
}
io.q := reg
}
class SimpleRegIO(val w: Int) extends Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
}
class AsyncResetRegVec(val w: Int, val init: BigInt) extends Module {
override def desiredName = s"AsyncResetRegVec_w${w}_i${init}"
val io = IO(new SimpleRegIO(w))
val reg = withReset(reset.asAsyncReset)(RegInit(init.U(w.W)))
when (io.en) {
reg := io.d
}
io.q := reg
}
object AsyncResetReg {
// Create Single Registers
def apply(d: Bool, clk: Clock, rst: Bool, init: Boolean, name: Option[String]): Bool = {
val reg = Module(new AsyncResetReg(if (init) 1 else 0))
reg.io.d := d
reg.io.clk := clk
reg.io.rst := rst
reg.io.en := true.B
name.foreach(reg.suggestName(_))
reg.io.q
}
def apply(d: Bool, clk: Clock, rst: Bool): Bool = apply(d, clk, rst, false, None)
def apply(d: Bool, clk: Clock, rst: Bool, name: String): Bool = apply(d, clk, rst, false, Some(name))
// Create Vectors of Registers
def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: Option[String] = None): UInt = {
val w = updateData.getWidth max resetData.bitLength
val reg = Module(new AsyncResetRegVec(w, resetData))
name.foreach(reg.suggestName(_))
reg.io.d := updateData
reg.io.en := enable
reg.io.q
}
def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: String): UInt = apply(updateData,
resetData, enable, Some(name))
def apply(updateData: UInt, resetData: BigInt): UInt = apply(updateData, resetData, enable = true.B)
def apply(updateData: UInt, resetData: BigInt, name: String): UInt = apply(updateData, resetData, enable = true.B, Some(name))
def apply(updateData: UInt, enable: Bool): UInt = apply(updateData, resetData=BigInt(0), enable)
def apply(updateData: UInt, enable: Bool, name: String): UInt = apply(updateData, resetData = BigInt(0), enable, Some(name))
def apply(updateData: UInt): UInt = apply(updateData, resetData = BigInt(0), enable = true.B)
def apply(updateData: UInt, name:String): UInt = apply(updateData, resetData = BigInt(0), enable = true.B, Some(name))
} | module AsyncResetRegVec_w1_i1(
input clock,
input reset,
input io_d,
output io_q,
input io_en
);
reg reg_0;
always @(posedge clock or posedge reset) begin
if (reset)
reg_0 <= 1'h1;
else if (io_en)
reg_0 <= io_d;
end
assign io_q = reg_0;
endmodule |
Generate the Verilog code corresponding to this Chisel code //******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v3.common.{MicroOp}
import boom.v3.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U}
def apply(ip: UInt, isel: UInt): SInt = {
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt
}
}
/**
* Object to get the FP rounding mode out of a packed immediate.
*/
object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } }
/**
* Object to get the FP function fype from a packed immediate.
* Note: only works if !(IS_B or IS_S)
*/
object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } }
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v3.common.BoomModule()(p)
with boom.v3.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire)
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop)
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop))
io.deq.bits := out
io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop)
// For flow queue behavior.
if (flow) {
when (io.empty) {
io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop)
io.deq.bits := io.enq.bits
io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
do_deq := false.B
when (io.deq.ready) { do_enq := false.B }
}
}
private val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
} | module BranchKillableQueue_2(
input clock,
input reset,
output io_enq_ready,
input io_enq_valid,
input [6:0] io_enq_bits_uop_uopc,
input [31:0] io_enq_bits_uop_inst,
input [31:0] io_enq_bits_uop_debug_inst,
input io_enq_bits_uop_is_rvc,
input [39:0] io_enq_bits_uop_debug_pc,
input [2:0] io_enq_bits_uop_iq_type,
input [9:0] io_enq_bits_uop_fu_code,
input [3:0] io_enq_bits_uop_ctrl_br_type,
input [1:0] io_enq_bits_uop_ctrl_op1_sel,
input [2:0] io_enq_bits_uop_ctrl_op2_sel,
input [2:0] io_enq_bits_uop_ctrl_imm_sel,
input [4:0] io_enq_bits_uop_ctrl_op_fcn,
input io_enq_bits_uop_ctrl_fcn_dw,
input [2:0] io_enq_bits_uop_ctrl_csr_cmd,
input io_enq_bits_uop_ctrl_is_load,
input io_enq_bits_uop_ctrl_is_sta,
input io_enq_bits_uop_ctrl_is_std,
input [1:0] io_enq_bits_uop_iw_state,
input io_enq_bits_uop_iw_p1_poisoned,
input io_enq_bits_uop_iw_p2_poisoned,
input io_enq_bits_uop_is_br,
input io_enq_bits_uop_is_jalr,
input io_enq_bits_uop_is_jal,
input io_enq_bits_uop_is_sfb,
input [7:0] io_enq_bits_uop_br_mask,
input [2:0] io_enq_bits_uop_br_tag,
input [3:0] io_enq_bits_uop_ftq_idx,
input io_enq_bits_uop_edge_inst,
input [5:0] io_enq_bits_uop_pc_lob,
input io_enq_bits_uop_taken,
input [19:0] io_enq_bits_uop_imm_packed,
input [11:0] io_enq_bits_uop_csr_addr,
input [4:0] io_enq_bits_uop_rob_idx,
input [2:0] io_enq_bits_uop_ldq_idx,
input [2:0] io_enq_bits_uop_stq_idx,
input [1:0] io_enq_bits_uop_rxq_idx,
input [5:0] io_enq_bits_uop_pdst,
input [5:0] io_enq_bits_uop_prs1,
input [5:0] io_enq_bits_uop_prs2,
input [5:0] io_enq_bits_uop_prs3,
input [3:0] io_enq_bits_uop_ppred,
input io_enq_bits_uop_prs1_busy,
input io_enq_bits_uop_prs2_busy,
input io_enq_bits_uop_prs3_busy,
input io_enq_bits_uop_ppred_busy,
input [5:0] io_enq_bits_uop_stale_pdst,
input io_enq_bits_uop_exception,
input [63:0] io_enq_bits_uop_exc_cause,
input io_enq_bits_uop_bypassable,
input [4:0] io_enq_bits_uop_mem_cmd,
input [1:0] io_enq_bits_uop_mem_size,
input io_enq_bits_uop_mem_signed,
input io_enq_bits_uop_is_fence,
input io_enq_bits_uop_is_fencei,
input io_enq_bits_uop_is_amo,
input io_enq_bits_uop_uses_ldq,
input io_enq_bits_uop_uses_stq,
input io_enq_bits_uop_is_sys_pc2epc,
input io_enq_bits_uop_is_unique,
input io_enq_bits_uop_flush_on_commit,
input io_enq_bits_uop_ldst_is_rs1,
input [5:0] io_enq_bits_uop_ldst,
input [5:0] io_enq_bits_uop_lrs1,
input [5:0] io_enq_bits_uop_lrs2,
input [5:0] io_enq_bits_uop_lrs3,
input io_enq_bits_uop_ldst_val,
input [1:0] io_enq_bits_uop_dst_rtype,
input [1:0] io_enq_bits_uop_lrs1_rtype,
input [1:0] io_enq_bits_uop_lrs2_rtype,
input io_enq_bits_uop_frs3_en,
input io_enq_bits_uop_fp_val,
input io_enq_bits_uop_fp_single,
input io_enq_bits_uop_xcpt_pf_if,
input io_enq_bits_uop_xcpt_ae_if,
input io_enq_bits_uop_xcpt_ma_if,
input io_enq_bits_uop_bp_debug_if,
input io_enq_bits_uop_bp_xcpt_if,
input [1:0] io_enq_bits_uop_debug_fsrc,
input [1:0] io_enq_bits_uop_debug_tsrc,
input [63:0] io_enq_bits_data,
input io_enq_bits_is_hella,
input io_deq_ready,
output io_deq_valid,
output [7:0] io_deq_bits_uop_br_mask,
output [2:0] io_deq_bits_uop_ldq_idx,
output [2:0] io_deq_bits_uop_stq_idx,
output io_deq_bits_uop_is_amo,
output io_deq_bits_uop_uses_ldq,
output io_deq_bits_uop_uses_stq,
output [63:0] io_deq_bits_data,
output io_deq_bits_is_hella,
input [7:0] io_brupdate_b1_resolve_mask,
input [7:0] io_brupdate_b1_mispredict_mask,
input io_flush
);
wire [64:0] _ram_ext_R0_data;
reg valids_0;
reg valids_1;
reg valids_2;
reg valids_3;
reg [7:0] uops_0_br_mask;
reg [2:0] uops_0_ldq_idx;
reg [2:0] uops_0_stq_idx;
reg uops_0_is_amo;
reg uops_0_uses_ldq;
reg uops_0_uses_stq;
reg [7:0] uops_1_br_mask;
reg [2:0] uops_1_ldq_idx;
reg [2:0] uops_1_stq_idx;
reg uops_1_is_amo;
reg uops_1_uses_ldq;
reg uops_1_uses_stq;
reg [7:0] uops_2_br_mask;
reg [2:0] uops_2_ldq_idx;
reg [2:0] uops_2_stq_idx;
reg uops_2_is_amo;
reg uops_2_uses_ldq;
reg uops_2_uses_stq;
reg [7:0] uops_3_br_mask;
reg [2:0] uops_3_ldq_idx;
reg [2:0] uops_3_stq_idx;
reg uops_3_is_amo;
reg uops_3_uses_ldq;
reg uops_3_uses_stq;
reg [1:0] enq_ptr_value;
reg [1:0] deq_ptr_value;
reg maybe_full;
wire ptr_match = enq_ptr_value == deq_ptr_value;
wire io_empty = ptr_match & ~maybe_full;
wire full = ptr_match & maybe_full;
wire do_enq = ~full & io_enq_valid;
wire [3:0] _GEN = {{valids_3}, {valids_2}, {valids_1}, {valids_0}};
wire _GEN_0 = _GEN[deq_ptr_value];
wire [3:0][7:0] _GEN_1 = {{uops_3_br_mask}, {uops_2_br_mask}, {uops_1_br_mask}, {uops_0_br_mask}};
wire [7:0] out_uop_br_mask = _GEN_1[deq_ptr_value];
wire [3:0][2:0] _GEN_2 = {{uops_3_ldq_idx}, {uops_2_ldq_idx}, {uops_1_ldq_idx}, {uops_0_ldq_idx}};
wire [3:0][2:0] _GEN_3 = {{uops_3_stq_idx}, {uops_2_stq_idx}, {uops_1_stq_idx}, {uops_0_stq_idx}};
wire [3:0] _GEN_4 = {{uops_3_is_amo}, {uops_2_is_amo}, {uops_1_is_amo}, {uops_0_is_amo}};
wire [3:0] _GEN_5 = {{uops_3_uses_ldq}, {uops_2_uses_ldq}, {uops_1_uses_ldq}, {uops_0_uses_ldq}};
wire out_uop_uses_ldq = _GEN_5[deq_ptr_value];
wire [3:0] _GEN_6 = {{uops_3_uses_stq}, {uops_2_uses_stq}, {uops_1_uses_stq}, {uops_0_uses_stq}};
wire do_deq = (io_deq_ready | ~_GEN_0) & ~io_empty;
wire _GEN_7 = enq_ptr_value == 2'h0;
wire _GEN_8 = do_enq & _GEN_7;
wire _GEN_9 = enq_ptr_value == 2'h1;
wire _GEN_10 = do_enq & _GEN_9;
wire _GEN_11 = enq_ptr_value == 2'h2;
wire _GEN_12 = do_enq & _GEN_11;
wire _GEN_13 = do_enq & (&enq_ptr_value);
wire [7:0] _uops_br_mask_T_1 = io_enq_bits_uop_br_mask & ~io_brupdate_b1_resolve_mask;
always @(posedge clock) begin
if (reset) begin
valids_0 <= 1'h0;
valids_1 <= 1'h0;
valids_2 <= 1'h0;
valids_3 <= 1'h0;
enq_ptr_value <= 2'h0;
deq_ptr_value <= 2'h0;
maybe_full <= 1'h0;
end
else begin
valids_0 <= ~(do_deq & deq_ptr_value == 2'h0) & (_GEN_8 | valids_0 & (io_brupdate_b1_mispredict_mask & uops_0_br_mask) == 8'h0 & ~(io_flush & uops_0_uses_ldq));
valids_1 <= ~(do_deq & deq_ptr_value == 2'h1) & (_GEN_10 | valids_1 & (io_brupdate_b1_mispredict_mask & uops_1_br_mask) == 8'h0 & ~(io_flush & uops_1_uses_ldq));
valids_2 <= ~(do_deq & deq_ptr_value == 2'h2) & (_GEN_12 | valids_2 & (io_brupdate_b1_mispredict_mask & uops_2_br_mask) == 8'h0 & ~(io_flush & uops_2_uses_ldq));
valids_3 <= ~(do_deq & (&deq_ptr_value)) & (_GEN_13 | valids_3 & (io_brupdate_b1_mispredict_mask & uops_3_br_mask) == 8'h0 & ~(io_flush & uops_3_uses_ldq));
if (do_enq)
enq_ptr_value <= enq_ptr_value + 2'h1;
if (do_deq)
deq_ptr_value <= deq_ptr_value + 2'h1;
if (~(do_enq == do_deq))
maybe_full <= do_enq;
end
uops_0_br_mask <= do_enq & _GEN_7 ? _uops_br_mask_T_1 : ({8{~valids_0}} | ~io_brupdate_b1_resolve_mask) & uops_0_br_mask;
if (_GEN_8) begin
uops_0_ldq_idx <= io_enq_bits_uop_ldq_idx;
uops_0_stq_idx <= io_enq_bits_uop_stq_idx;
uops_0_is_amo <= io_enq_bits_uop_is_amo;
uops_0_uses_ldq <= io_enq_bits_uop_uses_ldq;
uops_0_uses_stq <= io_enq_bits_uop_uses_stq;
end
uops_1_br_mask <= do_enq & _GEN_9 ? _uops_br_mask_T_1 : ({8{~valids_1}} | ~io_brupdate_b1_resolve_mask) & uops_1_br_mask;
if (_GEN_10) begin
uops_1_ldq_idx <= io_enq_bits_uop_ldq_idx;
uops_1_stq_idx <= io_enq_bits_uop_stq_idx;
uops_1_is_amo <= io_enq_bits_uop_is_amo;
uops_1_uses_ldq <= io_enq_bits_uop_uses_ldq;
uops_1_uses_stq <= io_enq_bits_uop_uses_stq;
end
uops_2_br_mask <= do_enq & _GEN_11 ? _uops_br_mask_T_1 : ({8{~valids_2}} | ~io_brupdate_b1_resolve_mask) & uops_2_br_mask;
if (_GEN_12) begin
uops_2_ldq_idx <= io_enq_bits_uop_ldq_idx;
uops_2_stq_idx <= io_enq_bits_uop_stq_idx;
uops_2_is_amo <= io_enq_bits_uop_is_amo;
uops_2_uses_ldq <= io_enq_bits_uop_uses_ldq;
uops_2_uses_stq <= io_enq_bits_uop_uses_stq;
end
uops_3_br_mask <= do_enq & (&enq_ptr_value) ? _uops_br_mask_T_1 : ({8{~valids_3}} | ~io_brupdate_b1_resolve_mask) & uops_3_br_mask;
if (_GEN_13) begin
uops_3_ldq_idx <= io_enq_bits_uop_ldq_idx;
uops_3_stq_idx <= io_enq_bits_uop_stq_idx;
uops_3_is_amo <= io_enq_bits_uop_is_amo;
uops_3_uses_ldq <= io_enq_bits_uop_uses_ldq;
uops_3_uses_stq <= io_enq_bits_uop_uses_stq;
end
end
ram_4x65 ram_ext (
.R0_addr (deq_ptr_value),
.R0_en (1'h1),
.R0_clk (clock),
.R0_data (_ram_ext_R0_data),
.W0_addr (enq_ptr_value),
.W0_en (do_enq),
.W0_clk (clock),
.W0_data ({io_enq_bits_is_hella, io_enq_bits_data})
);
assign io_enq_ready = ~full;
assign io_deq_valid = ~io_empty & _GEN_0 & (io_brupdate_b1_mispredict_mask & out_uop_br_mask) == 8'h0 & ~(io_flush & out_uop_uses_ldq);
assign io_deq_bits_uop_br_mask = out_uop_br_mask & ~io_brupdate_b1_resolve_mask;
assign io_deq_bits_uop_ldq_idx = _GEN_2[deq_ptr_value];
assign io_deq_bits_uop_stq_idx = _GEN_3[deq_ptr_value];
assign io_deq_bits_uop_is_amo = _GEN_4[deq_ptr_value];
assign io_deq_bits_uop_uses_ldq = out_uop_uses_ldq;
assign io_deq_bits_uop_uses_stq = _GEN_6[deq_ptr_value];
assign io_deq_bits_data = _ram_ext_R0_data[63:0];
assign io_deq_bits_is_hella = _ram_ext_R0_data[64];
endmodule |
Generate the Verilog code corresponding to this Chisel code //******************************************************************************
// Copyright (c) 2013 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Register File (Abstract class and Synthesizable RegFile)
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.exu
import scala.collection.mutable.ArrayBuffer
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import boom.v3.common._
import boom.v3.util.{BoomCoreStringPrefix}
/**
* IO bundle for a register read port
*
* @param addrWidth size of register address in bits
* @param dataWidth size of register in bits
*/
class RegisterFileReadPortIO(val addrWidth: Int, val dataWidth: Int)(implicit p: Parameters) extends BoomBundle
{
val addr = Input(UInt(addrWidth.W))
val data = Output(UInt(dataWidth.W))
}
/**
* IO bundle for the register write port
*
* @param addrWidth size of register address in bits
* @param dataWidth size of register in bits
*/
class RegisterFileWritePort(val addrWidth: Int, val dataWidth: Int)(implicit p: Parameters) extends BoomBundle
{
val addr = UInt(addrWidth.W)
val data = UInt(dataWidth.W)
}
/**
* Utility function to turn ExeUnitResps to match the regfile's WritePort I/Os.
*/
object WritePort
{
def apply(enq: DecoupledIO[ExeUnitResp], addrWidth: Int, dataWidth: Int, rtype: UInt)
(implicit p: Parameters): Valid[RegisterFileWritePort] = {
val wport = Wire(Valid(new RegisterFileWritePort(addrWidth, dataWidth)))
wport.valid := enq.valid && enq.bits.uop.dst_rtype === rtype
wport.bits.addr := enq.bits.uop.pdst
wport.bits.data := enq.bits.data
enq.ready := true.B
wport
}
}
/**
* Register file abstract class
*
* @param numRegisters number of registers
* @param numReadPorts number of read ports
* @param numWritePorts number of write ports
* @param registerWidth size of registers in bits
* @param bypassableArray list of write ports from func units to the read port of the regfile
*/
abstract class RegisterFile(
numRegisters: Int,
numReadPorts: Int,
numWritePorts: Int,
registerWidth: Int,
bypassableArray: Seq[Boolean]) // which write ports can be bypassed to the read ports?
(implicit p: Parameters) extends BoomModule
{
val io = IO(new BoomBundle {
val read_ports = Vec(numReadPorts, new RegisterFileReadPortIO(maxPregSz, registerWidth))
val write_ports = Flipped(Vec(numWritePorts, Valid(new RegisterFileWritePort(maxPregSz, registerWidth))))
})
private val rf_cost = (numReadPorts + numWritePorts) * (numReadPorts + 2*numWritePorts)
private val type_str = if (registerWidth == fLen+1) "Floating Point" else "Integer"
override def toString: String = BoomCoreStringPrefix(
"==" + type_str + " Regfile==",
"Num RF Read Ports : " + numReadPorts,
"Num RF Write Ports : " + numWritePorts,
"RF Cost (R+W)*(R+2W) : " + rf_cost,
"Bypassable Units : " + bypassableArray)
}
/**
* A synthesizable model of a Register File. You will likely want to blackbox this for more than modest port counts.
*
* @param numRegisters number of registers
* @param numReadPorts number of read ports
* @param numWritePorts number of write ports
* @param registerWidth size of registers in bits
* @param bypassableArray list of write ports from func units to the read port of the regfile
*/
class RegisterFileSynthesizable(
numRegisters: Int,
numReadPorts: Int,
numWritePorts: Int,
registerWidth: Int,
bypassableArray: Seq[Boolean])
(implicit p: Parameters)
extends RegisterFile(numRegisters, numReadPorts, numWritePorts, registerWidth, bypassableArray)
{
// --------------------------------------------------------------
val regfile = Mem(numRegisters, UInt(registerWidth.W))
// --------------------------------------------------------------
// Read ports.
val read_data = Wire(Vec(numReadPorts, UInt(registerWidth.W)))
// Register the read port addresses to give a full cycle to the RegisterRead Stage (if desired).
val read_addrs = io.read_ports.map(p => RegNext(p.addr))
for (i <- 0 until numReadPorts) {
read_data(i) := regfile(read_addrs(i))
}
// --------------------------------------------------------------
// Bypass out of the ALU's write ports.
// We are assuming we cannot bypass a writer to a reader within the regfile memory
// for a write that occurs at the end of cycle S1 and a read that returns data on cycle S1.
// But since these bypasses are expensive, and not all write ports need to bypass their data,
// only perform the w->r bypass on a select number of write ports.
require (bypassableArray.length == io.write_ports.length)
if (bypassableArray.reduce(_||_)) {
val bypassable_wports = ArrayBuffer[Valid[RegisterFileWritePort]]()
io.write_ports zip bypassableArray map { case (wport, b) => if (b) { bypassable_wports += wport} }
for (i <- 0 until numReadPorts) {
val bypass_ens = bypassable_wports.map(x => x.valid &&
x.bits.addr === read_addrs(i))
val bypass_data = Mux1H(VecInit(bypass_ens.toSeq), VecInit(bypassable_wports.map(_.bits.data).toSeq))
io.read_ports(i).data := Mux(bypass_ens.reduce(_|_), bypass_data, read_data(i))
}
} else {
for (i <- 0 until numReadPorts) {
io.read_ports(i).data := read_data(i)
}
}
// --------------------------------------------------------------
// Write ports.
for (wport <- io.write_ports) {
when (wport.valid) {
regfile(wport.bits.addr) := wport.bits.data
}
}
// ensure there is only 1 writer per register (unless to preg0)
if (numWritePorts > 1) {
for (i <- 0 until (numWritePorts - 1)) {
for (j <- (i + 1) until numWritePorts) {
assert(!io.write_ports(i).valid ||
!io.write_ports(j).valid ||
(io.write_ports(i).bits.addr =/= io.write_ports(j).bits.addr) ||
(io.write_ports(i).bits.addr === 0.U), // note: you only have to check one here
"[regfile] too many writers a register")
}
}
}
} | module RegisterFileSynthesizable(
input clock,
input reset,
input [5:0] io_read_ports_0_addr,
output [64:0] io_read_ports_0_data,
input [5:0] io_read_ports_1_addr,
output [64:0] io_read_ports_1_data,
input [5:0] io_read_ports_2_addr,
output [64:0] io_read_ports_2_data,
input io_write_ports_0_valid,
input [5:0] io_write_ports_0_bits_addr,
input [64:0] io_write_ports_0_bits_data,
input io_write_ports_1_valid,
input [5:0] io_write_ports_1_bits_addr,
input [64:0] io_write_ports_1_bits_data
);
reg [5:0] read_addrs_0;
reg [5:0] read_addrs_1;
reg [5:0] read_addrs_2;
always @(posedge clock) begin
read_addrs_0 <= io_read_ports_0_addr;
read_addrs_1 <= io_read_ports_1_addr;
read_addrs_2 <= io_read_ports_2_addr;
end
regfile_48x65 regfile_ext (
.R0_addr (read_addrs_2),
.R0_en (1'h1),
.R0_clk (clock),
.R0_data (io_read_ports_2_data),
.R1_addr (read_addrs_1),
.R1_en (1'h1),
.R1_clk (clock),
.R1_data (io_read_ports_1_data),
.R2_addr (read_addrs_0),
.R2_en (1'h1),
.R2_clk (clock),
.R2_data (io_read_ports_0_data),
.W0_addr (io_write_ports_1_bits_addr),
.W0_en (io_write_ports_1_valid),
.W0_clk (clock),
.W0_data (io_write_ports_1_bits_data),
.W1_addr (io_write_ports_0_bits_addr),
.W1_en (io_write_ports_0_valid),
.W1_clk (clock),
.W1_data (io_write_ports_0_bits_data)
);
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3.{Data, SyncReadMem, Vec}
import chisel3.util.log2Ceil
object DescribedSRAM {
def apply[T <: Data](
name: String,
desc: String,
size: BigInt, // depth
data: T
): SyncReadMem[T] = {
val mem = SyncReadMem(size, data)
mem.suggestName(name)
val granWidth = data match {
case v: Vec[_] => v.head.getWidth
case d => d.getWidth
}
val uid = 0
Annotated.srams(
component = mem,
name = name,
address_width = log2Ceil(size),
data_width = data.getWidth,
depth = size,
description = desc,
write_mask_granularity = granWidth
)
mem
}
} | module rockettile_dcache_data_arrays_0(
input [8:0] RW0_addr,
input RW0_en,
input RW0_clk,
input RW0_wmode,
input [511:0] RW0_wdata,
output [511:0] RW0_rdata,
input [63:0] RW0_wmask
);
rockettile_dcache_data_arrays_0_ext rockettile_dcache_data_arrays_0_ext (
.RW0_addr (RW0_addr),
.RW0_en (RW0_en),
.RW0_clk (RW0_clk),
.RW0_wmode (RW0_wmode),
.RW0_wdata (RW0_wdata),
.RW0_rdata (RW0_rdata),
.RW0_wmask (RW0_wmask)
);
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
/** Implements the same interface as chisel3.util.Queue, but uses a shift
* register internally. It is less energy efficient whenever the queue
* has more than one entry populated, but is faster on the dequeue side.
* It is efficient for usually-empty flow-through queues. */
class ShiftQueue[T <: Data](gen: T,
val entries: Int,
pipe: Boolean = false,
flow: Boolean = false)
extends Module {
val io = IO(new QueueIO(gen, entries) {
val mask = Output(UInt(entries.W))
})
private val valid = RegInit(VecInit(Seq.fill(entries) { false.B }))
private val elts = Reg(Vec(entries, gen))
for (i <- 0 until entries) {
def paddedValid(i: Int) = if (i == -1) true.B else if (i == entries) false.B else valid(i)
val wdata = if (i == entries-1) io.enq.bits else Mux(valid(i+1), elts(i+1), io.enq.bits)
val wen =
Mux(io.deq.ready,
paddedValid(i+1) || io.enq.fire && ((i == 0 && !flow).B || valid(i)),
io.enq.fire && paddedValid(i-1) && !valid(i))
when (wen) { elts(i) := wdata }
valid(i) :=
Mux(io.deq.ready,
paddedValid(i+1) || io.enq.fire && ((i == 0 && !flow).B || valid(i)),
io.enq.fire && paddedValid(i-1) || valid(i))
}
io.enq.ready := !valid(entries-1)
io.deq.valid := valid(0)
io.deq.bits := elts.head
if (flow) {
when (io.enq.valid) { io.deq.valid := true.B }
when (!valid(0)) { io.deq.bits := io.enq.bits }
}
if (pipe) {
when (io.deq.ready) { io.enq.ready := true.B }
}
io.mask := valid.asUInt
io.count := PopCount(io.mask)
}
object ShiftQueue
{
def apply[T <: Data](enq: DecoupledIO[T], entries: Int = 2, pipe: Boolean = false, flow: Boolean = false): DecoupledIO[T] = {
val q = Module(new ShiftQueue(enq.bits.cloneType, entries, pipe, flow))
q.io.enq <> enq
q.io.deq
}
} | module ShiftQueue(
input clock,
input reset,
output io_enq_ready,
input io_enq_valid,
input io_enq_bits_btb_taken,
input io_enq_bits_btb_bridx,
input [4:0] io_enq_bits_btb_entry,
input [7:0] io_enq_bits_btb_bht_history,
input [39:0] io_enq_bits_pc,
input [31:0] io_enq_bits_data,
input io_enq_bits_xcpt_pf_inst,
input io_enq_bits_xcpt_ae_inst,
input io_enq_bits_replay,
input io_deq_ready,
output io_deq_valid,
output io_deq_bits_btb_taken,
output io_deq_bits_btb_bridx,
output [4:0] io_deq_bits_btb_entry,
output [7:0] io_deq_bits_btb_bht_history,
output [39:0] io_deq_bits_pc,
output [31:0] io_deq_bits_data,
output io_deq_bits_xcpt_pf_inst,
output io_deq_bits_xcpt_gf_inst,
output io_deq_bits_xcpt_ae_inst,
output io_deq_bits_replay,
output [4:0] io_mask
);
reg valid_0;
reg valid_1;
reg valid_2;
reg valid_3;
reg valid_4;
reg elts_0_btb_taken;
reg elts_0_btb_bridx;
reg [4:0] elts_0_btb_entry;
reg [7:0] elts_0_btb_bht_history;
reg [39:0] elts_0_pc;
reg [31:0] elts_0_data;
reg elts_0_xcpt_pf_inst;
reg elts_0_xcpt_gf_inst;
reg elts_0_xcpt_ae_inst;
reg elts_0_replay;
reg elts_1_btb_taken;
reg elts_1_btb_bridx;
reg [4:0] elts_1_btb_entry;
reg [7:0] elts_1_btb_bht_history;
reg [39:0] elts_1_pc;
reg [31:0] elts_1_data;
reg elts_1_xcpt_pf_inst;
reg elts_1_xcpt_gf_inst;
reg elts_1_xcpt_ae_inst;
reg elts_1_replay;
reg elts_2_btb_taken;
reg elts_2_btb_bridx;
reg [4:0] elts_2_btb_entry;
reg [7:0] elts_2_btb_bht_history;
reg [39:0] elts_2_pc;
reg [31:0] elts_2_data;
reg elts_2_xcpt_pf_inst;
reg elts_2_xcpt_gf_inst;
reg elts_2_xcpt_ae_inst;
reg elts_2_replay;
reg elts_3_btb_taken;
reg elts_3_btb_bridx;
reg [4:0] elts_3_btb_entry;
reg [7:0] elts_3_btb_bht_history;
reg [39:0] elts_3_pc;
reg [31:0] elts_3_data;
reg elts_3_xcpt_pf_inst;
reg elts_3_xcpt_gf_inst;
reg elts_3_xcpt_ae_inst;
reg elts_3_replay;
reg elts_4_btb_taken;
reg elts_4_btb_bridx;
reg [4:0] elts_4_btb_entry;
reg [7:0] elts_4_btb_bht_history;
reg [39:0] elts_4_pc;
reg [31:0] elts_4_data;
reg elts_4_xcpt_pf_inst;
reg elts_4_xcpt_gf_inst;
reg elts_4_xcpt_ae_inst;
reg elts_4_replay;
wire _valid_4_T_4 = ~valid_4 & io_enq_valid;
wire wen_4 = io_deq_ready ? _valid_4_T_4 & valid_4 : _valid_4_T_4 & valid_3;
always @(posedge clock) begin
if (reset) begin
valid_0 <= 1'h0;
valid_1 <= 1'h0;
valid_2 <= 1'h0;
valid_3 <= 1'h0;
valid_4 <= 1'h0;
end
else begin
valid_0 <= io_deq_ready ? valid_1 | _valid_4_T_4 & valid_0 : _valid_4_T_4 | valid_0;
valid_1 <= io_deq_ready ? valid_2 | _valid_4_T_4 & valid_1 : _valid_4_T_4 & valid_0 | valid_1;
valid_2 <= io_deq_ready ? valid_3 | _valid_4_T_4 & valid_2 : _valid_4_T_4 & valid_1 | valid_2;
valid_3 <= io_deq_ready ? valid_4 | _valid_4_T_4 & valid_3 : _valid_4_T_4 & valid_2 | valid_3;
valid_4 <= io_deq_ready ? _valid_4_T_4 & valid_4 : _valid_4_T_4 & valid_3 | valid_4;
end
if (io_deq_ready ? valid_1 | _valid_4_T_4 & valid_0 : _valid_4_T_4 & ~valid_0) begin
elts_0_btb_taken <= valid_1 ? elts_1_btb_taken : io_enq_bits_btb_taken;
elts_0_btb_bridx <= valid_1 ? elts_1_btb_bridx : io_enq_bits_btb_bridx;
elts_0_btb_entry <= valid_1 ? elts_1_btb_entry : io_enq_bits_btb_entry;
elts_0_btb_bht_history <= valid_1 ? elts_1_btb_bht_history : io_enq_bits_btb_bht_history;
elts_0_pc <= valid_1 ? elts_1_pc : io_enq_bits_pc;
elts_0_data <= valid_1 ? elts_1_data : io_enq_bits_data;
elts_0_xcpt_pf_inst <= valid_1 ? elts_1_xcpt_pf_inst : io_enq_bits_xcpt_pf_inst;
elts_0_xcpt_gf_inst <= valid_1 & elts_1_xcpt_gf_inst;
elts_0_xcpt_ae_inst <= valid_1 ? elts_1_xcpt_ae_inst : io_enq_bits_xcpt_ae_inst;
elts_0_replay <= valid_1 ? elts_1_replay : io_enq_bits_replay;
end
if (io_deq_ready ? valid_2 | _valid_4_T_4 & valid_1 : _valid_4_T_4 & valid_0 & ~valid_1) begin
elts_1_btb_taken <= valid_2 ? elts_2_btb_taken : io_enq_bits_btb_taken;
elts_1_btb_bridx <= valid_2 ? elts_2_btb_bridx : io_enq_bits_btb_bridx;
elts_1_btb_entry <= valid_2 ? elts_2_btb_entry : io_enq_bits_btb_entry;
elts_1_btb_bht_history <= valid_2 ? elts_2_btb_bht_history : io_enq_bits_btb_bht_history;
elts_1_pc <= valid_2 ? elts_2_pc : io_enq_bits_pc;
elts_1_data <= valid_2 ? elts_2_data : io_enq_bits_data;
elts_1_xcpt_pf_inst <= valid_2 ? elts_2_xcpt_pf_inst : io_enq_bits_xcpt_pf_inst;
elts_1_xcpt_gf_inst <= valid_2 & elts_2_xcpt_gf_inst;
elts_1_xcpt_ae_inst <= valid_2 ? elts_2_xcpt_ae_inst : io_enq_bits_xcpt_ae_inst;
elts_1_replay <= valid_2 ? elts_2_replay : io_enq_bits_replay;
end
if (io_deq_ready ? valid_3 | _valid_4_T_4 & valid_2 : _valid_4_T_4 & valid_1 & ~valid_2) begin
elts_2_btb_taken <= valid_3 ? elts_3_btb_taken : io_enq_bits_btb_taken;
elts_2_btb_bridx <= valid_3 ? elts_3_btb_bridx : io_enq_bits_btb_bridx;
elts_2_btb_entry <= valid_3 ? elts_3_btb_entry : io_enq_bits_btb_entry;
elts_2_btb_bht_history <= valid_3 ? elts_3_btb_bht_history : io_enq_bits_btb_bht_history;
elts_2_pc <= valid_3 ? elts_3_pc : io_enq_bits_pc;
elts_2_data <= valid_3 ? elts_3_data : io_enq_bits_data;
elts_2_xcpt_pf_inst <= valid_3 ? elts_3_xcpt_pf_inst : io_enq_bits_xcpt_pf_inst;
elts_2_xcpt_gf_inst <= valid_3 & elts_3_xcpt_gf_inst;
elts_2_xcpt_ae_inst <= valid_3 ? elts_3_xcpt_ae_inst : io_enq_bits_xcpt_ae_inst;
elts_2_replay <= valid_3 ? elts_3_replay : io_enq_bits_replay;
end
if (io_deq_ready ? valid_4 | _valid_4_T_4 & valid_3 : _valid_4_T_4 & valid_2 & ~valid_3) begin
elts_3_btb_taken <= valid_4 ? elts_4_btb_taken : io_enq_bits_btb_taken;
elts_3_btb_bridx <= valid_4 ? elts_4_btb_bridx : io_enq_bits_btb_bridx;
elts_3_btb_entry <= valid_4 ? elts_4_btb_entry : io_enq_bits_btb_entry;
elts_3_btb_bht_history <= valid_4 ? elts_4_btb_bht_history : io_enq_bits_btb_bht_history;
elts_3_pc <= valid_4 ? elts_4_pc : io_enq_bits_pc;
elts_3_data <= valid_4 ? elts_4_data : io_enq_bits_data;
elts_3_xcpt_pf_inst <= valid_4 ? elts_4_xcpt_pf_inst : io_enq_bits_xcpt_pf_inst;
elts_3_xcpt_gf_inst <= valid_4 & elts_4_xcpt_gf_inst;
elts_3_xcpt_ae_inst <= valid_4 ? elts_4_xcpt_ae_inst : io_enq_bits_xcpt_ae_inst;
elts_3_replay <= valid_4 ? elts_4_replay : io_enq_bits_replay;
end
if (wen_4) begin
elts_4_btb_taken <= io_enq_bits_btb_taken;
elts_4_btb_bridx <= io_enq_bits_btb_bridx;
elts_4_btb_entry <= io_enq_bits_btb_entry;
elts_4_btb_bht_history <= io_enq_bits_btb_bht_history;
elts_4_pc <= io_enq_bits_pc;
elts_4_data <= io_enq_bits_data;
elts_4_xcpt_pf_inst <= io_enq_bits_xcpt_pf_inst;
elts_4_xcpt_ae_inst <= io_enq_bits_xcpt_ae_inst;
elts_4_replay <= io_enq_bits_replay;
end
elts_4_xcpt_gf_inst <= ~wen_4 & elts_4_xcpt_gf_inst;
end
assign io_enq_ready = ~valid_4;
assign io_deq_valid = io_enq_valid | valid_0;
assign io_deq_bits_btb_taken = valid_0 ? elts_0_btb_taken : io_enq_bits_btb_taken;
assign io_deq_bits_btb_bridx = valid_0 ? elts_0_btb_bridx : io_enq_bits_btb_bridx;
assign io_deq_bits_btb_entry = valid_0 ? elts_0_btb_entry : io_enq_bits_btb_entry;
assign io_deq_bits_btb_bht_history = valid_0 ? elts_0_btb_bht_history : io_enq_bits_btb_bht_history;
assign io_deq_bits_pc = valid_0 ? elts_0_pc : io_enq_bits_pc;
assign io_deq_bits_data = valid_0 ? elts_0_data : io_enq_bits_data;
assign io_deq_bits_xcpt_pf_inst = valid_0 ? elts_0_xcpt_pf_inst : io_enq_bits_xcpt_pf_inst;
assign io_deq_bits_xcpt_gf_inst = valid_0 & elts_0_xcpt_gf_inst;
assign io_deq_bits_xcpt_ae_inst = valid_0 ? elts_0_xcpt_ae_inst : io_enq_bits_xcpt_ae_inst;
assign io_deq_bits_replay = valid_0 ? elts_0_replay : io_enq_bits_replay;
assign io_mask = {valid_4, valid_3, valid_2, valid_1, valid_0};
endmodule |
Generate the Verilog code corresponding to this Chisel code package testchipip.serdes
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util._
import freechips.rocketchip.tilelink._
trait TLFieldHelper {
def getBodyFields(b: TLChannel): Seq[Data] = b match {
case b: TLBundleA => Seq(b.mask, b.data, b.corrupt)
case b: TLBundleB => Seq(b.mask, b.data, b.corrupt)
case b: TLBundleC => Seq( b.data, b.corrupt)
case b: TLBundleD => Seq( b.data, b.corrupt)
case b: TLBundleE => Seq()
}
def getConstFields(b: TLChannel): Seq[Data] = b match {
case b: TLBundleA => Seq(b.opcode, b.param, b.size, b.source, b.address, b.user, b.echo )
case b: TLBundleB => Seq(b.opcode, b.param, b.size, b.source, b.address )
case b: TLBundleC => Seq(b.opcode, b.param, b.size, b.source, b.address, b.user, b.echo )
case b: TLBundleD => Seq(b.opcode, b.param, b.size, b.source, b.user, b.echo, b.sink, b.denied)
case b: TLBundleE => Seq( b.sink )
}
def minTLPayloadWidth(b: TLChannel): Int = Seq(getBodyFields(b), getConstFields(b)).map(_.map(_.getWidth).sum).max
def minTLPayloadWidth(bs: Seq[TLChannel]): Int = bs.map(b => minTLPayloadWidth(b)).max
def minTLPayloadWidth(b: TLBundle): Int = minTLPayloadWidth(Seq(b.a, b.b, b.c, b.d, b.e).map(_.bits))
}
class TLBeat(val beatWidth: Int) extends Bundle {
val payload = UInt(beatWidth.W)
val head = Bool()
val tail = Bool()
}
abstract class TLChannelToBeat[T <: TLChannel](gen: => T, edge: TLEdge, nameSuffix: Option[String])(implicit val p: Parameters) extends Module with TLFieldHelper {
override def desiredName = (Seq(this.getClass.getSimpleName) ++ nameSuffix ++ Seq(gen.params.shortName)).mkString("_")
val beatWidth = minTLPayloadWidth(gen)
val io = IO(new Bundle {
val protocol = Flipped(Decoupled(gen))
val beat = Decoupled(new TLBeat(beatWidth))
})
def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B
// convert decoupled to irrevocable
val q = Module(new Queue(gen, 1, pipe=true, flow=true))
q.io.enq <> io.protocol
val protocol = q.io.deq
val has_body = Wire(Bool())
val body_fields = getBodyFields(protocol.bits)
val const_fields = getConstFields(protocol.bits)
val head = edge.first(protocol.bits, protocol.fire)
val tail = edge.last(protocol.bits, protocol.fire)
val body = Cat( body_fields.filter(_.getWidth > 0).map(_.asUInt))
val const = Cat(const_fields.filter(_.getWidth > 0).map(_.asUInt))
val is_body = RegInit(false.B)
io.beat.valid := protocol.valid
protocol.ready := io.beat.ready && (is_body || !has_body)
io.beat.bits.head := head && !is_body
io.beat.bits.tail := tail && (is_body || !has_body)
io.beat.bits.payload := Mux(is_body, body, const)
when (io.beat.fire && io.beat.bits.head) { is_body := true.B }
when (io.beat.fire && io.beat.bits.tail) { is_body := false.B }
}
abstract class TLChannelFromBeat[T <: TLChannel](gen: => T, nameSuffix: Option[String])(implicit val p: Parameters) extends Module with TLFieldHelper {
override def desiredName = (Seq(this.getClass.getSimpleName) ++ nameSuffix ++ Seq(gen.params.shortName)).mkString("_")
val beatWidth = minTLPayloadWidth(gen)
val io = IO(new Bundle {
val protocol = Decoupled(gen)
val beat = Flipped(Decoupled(new TLBeat(beatWidth)))
})
// Handle size = 1 gracefully (Chisel3 empty range is broken)
def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0)
val protocol = Wire(Decoupled(gen))
io.protocol <> protocol
val body_fields = getBodyFields(protocol.bits)
val const_fields = getConstFields(protocol.bits)
val is_const = RegInit(true.B)
val const_reg = Reg(UInt(const_fields.map(_.getWidth).sum.W))
val const = Mux(io.beat.bits.head, io.beat.bits.payload, const_reg)
io.beat.ready := (is_const && !io.beat.bits.tail) || protocol.ready
protocol.valid := (!is_const || io.beat.bits.tail) && io.beat.valid
def assign(i: UInt, sigs: Seq[Data]) = {
var t = i
for (s <- sigs.reverse) {
s := t.asTypeOf(s.cloneType)
t = t >> s.getWidth
}
}
assign(const, const_fields)
assign(io.beat.bits.payload, body_fields)
when (io.beat.fire && io.beat.bits.head) { is_const := false.B; const_reg := io.beat.bits.payload }
when (io.beat.fire && io.beat.bits.tail) { is_const := true.B }
}
class TLAToBeat(edgeIn: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleA(bundle), edgeIn, nameSuffix)(p) {
has_body := edgeIn.hasData(protocol.bits) || (~protocol.bits.mask =/= 0.U)
}
class TLAFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleA(bundle), nameSuffix)(p) {
when (io.beat.bits.head) { io.protocol.bits.mask := ~(0.U(io.protocol.bits.mask.getWidth.W)) }
}
class TLBToBeat(edgeOut: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleB(bundle), edgeOut, nameSuffix)(p) {
has_body := edgeOut.hasData(protocol.bits) || (~protocol.bits.mask =/= 0.U)
}
class TLBFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleB(bundle), nameSuffix)(p) {
when (io.beat.bits.head) { io.protocol.bits.mask := ~(0.U(io.protocol.bits.mask.getWidth.W)) }
}
class TLCToBeat(edgeIn: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleC(bundle), edgeIn, nameSuffix)(p) {
has_body := edgeIn.hasData(protocol.bits)
}
class TLCFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleC(bundle), nameSuffix)(p)
class TLDToBeat(edgeOut: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleD(bundle), edgeOut, nameSuffix)(p) {
has_body := edgeOut.hasData(protocol.bits)
}
class TLDFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleD(bundle), nameSuffix)(p)
class TLEToBeat(edgeIn: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleE(bundle), edgeIn, nameSuffix)(p) {
has_body := edgeIn.hasData(protocol.bits)
}
class TLEFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleE(bundle), nameSuffix)(p) | module TLCFromBeat_SerialRAM_a64d64s8k8z8c(
input clock,
input reset,
output io_beat_ready,
input io_beat_valid,
input io_beat_bits_head,
input io_beat_bits_tail
);
reg is_const;
wire io_beat_ready_0 = is_const & ~io_beat_bits_tail;
wire _GEN = io_beat_ready_0 & io_beat_valid;
always @(posedge clock) begin
if (reset)
is_const <= 1'h1;
else
is_const <= _GEN & io_beat_bits_tail | ~(_GEN & io_beat_bits_head) & is_const;
end
assign io_beat_ready = io_beat_ready_0;
endmodule |
Generate the Verilog code corresponding to this Chisel code //******************************************************************************
// Copyright (c) 2013 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Functional Units
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// If regfile bypassing is disabled, then the functional unit must do its own
// bypassing in here on the WB stage (i.e., bypassing the io.resp.data)
//
// TODO: explore possibility of conditional IO fields? if a branch unit... how to add extra to IO in subclass?
package boom.v3.exu
import chisel3._
import chisel3.util._
import chisel3.experimental.dataview._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
import freechips.rocketchip.tile
import freechips.rocketchip.rocket.{PipelinedMultiplier,BP,BreakpointUnit,Causes,CSR}
import boom.v3.common._
import boom.v3.ifu._
import boom.v3.util._
/**t
* Functional unit constants
*/
object FUConstants
{
// bit mask, since a given execution pipeline may support multiple functional units
val FUC_SZ = 10
val FU_X = BitPat.dontCare(FUC_SZ)
val FU_ALU = 1.U(FUC_SZ.W)
val FU_JMP = 2.U(FUC_SZ.W)
val FU_MEM = 4.U(FUC_SZ.W)
val FU_MUL = 8.U(FUC_SZ.W)
val FU_DIV = 16.U(FUC_SZ.W)
val FU_CSR = 32.U(FUC_SZ.W)
val FU_FPU = 64.U(FUC_SZ.W)
val FU_FDV = 128.U(FUC_SZ.W)
val FU_I2F = 256.U(FUC_SZ.W)
val FU_F2I = 512.U(FUC_SZ.W)
// FP stores generate data through FP F2I, and generate address through MemAddrCalc
val FU_F2IMEM = 516.U(FUC_SZ.W)
}
import FUConstants._
/**
* Class to tell the FUDecoders what units it needs to support
*
* @param alu support alu unit?
* @param bru support br unit?
* @param mem support mem unit?
* @param muld support multiple div unit?
* @param fpu support FP unit?
* @param csr support csr writing unit?
* @param fdiv support FP div unit?
* @param ifpu support int to FP unit?
*/
class SupportedFuncUnits(
val alu: Boolean = false,
val jmp: Boolean = false,
val mem: Boolean = false,
val muld: Boolean = false,
val fpu: Boolean = false,
val csr: Boolean = false,
val fdiv: Boolean = false,
val ifpu: Boolean = false)
{
}
/**
* Bundle for signals sent to the functional unit
*
* @param dataWidth width of the data sent to the functional unit
*/
class FuncUnitReq(val dataWidth: Int)(implicit p: Parameters) extends BoomBundle
with HasBoomUOP
{
val numOperands = 3
val rs1_data = UInt(dataWidth.W)
val rs2_data = UInt(dataWidth.W)
val rs3_data = UInt(dataWidth.W) // only used for FMA units
val pred_data = Bool()
val kill = Bool() // kill everything
}
/**
* Bundle for the signals sent out of the function unit
*
* @param dataWidth data sent from the functional unit
*/
class FuncUnitResp(val dataWidth: Int)(implicit p: Parameters) extends BoomBundle
with HasBoomUOP
{
val predicated = Bool() // Was this response from a predicated-off instruction
val data = UInt(dataWidth.W)
val fflags = new ValidIO(new FFlagsResp)
val addr = UInt((vaddrBits+1).W) // only for maddr -> LSU
val mxcpt = new ValidIO(UInt((freechips.rocketchip.rocket.Causes.all.max+2).W)) //only for maddr->LSU
val sfence = Valid(new freechips.rocketchip.rocket.SFenceReq) // only for mcalc
}
/**
* Branch resolution information given from the branch unit
*/
class BrResolutionInfo(implicit p: Parameters) extends BoomBundle
{
val uop = new MicroOp
val valid = Bool()
val mispredict = Bool()
val taken = Bool() // which direction did the branch go?
val cfi_type = UInt(CFI_SZ.W)
// Info for recalculating the pc for this branch
val pc_sel = UInt(2.W)
val jalr_target = UInt(vaddrBitsExtended.W)
val target_offset = SInt()
}
class BrUpdateInfo(implicit p: Parameters) extends BoomBundle
{
// On the first cycle we get masks to kill registers
val b1 = new BrUpdateMasks
// On the second cycle we get indices to reset pointers
val b2 = new BrResolutionInfo
}
class BrUpdateMasks(implicit p: Parameters) extends BoomBundle
{
val resolve_mask = UInt(maxBrCount.W)
val mispredict_mask = UInt(maxBrCount.W)
}
/**
* Abstract top level functional unit class that wraps a lower level hand made functional unit
*
* @param isPipelined is the functional unit pipelined?
* @param numStages how many pipeline stages does the functional unit have
* @param numBypassStages how many bypass stages does the function unit have
* @param dataWidth width of the data being operated on in the functional unit
* @param hasBranchUnit does this functional unit have a branch unit?
*/
abstract class FunctionalUnit(
val isPipelined: Boolean,
val numStages: Int,
val numBypassStages: Int,
val dataWidth: Int,
val isJmpUnit: Boolean = false,
val isAluUnit: Boolean = false,
val isMemAddrCalcUnit: Boolean = false,
val needsFcsr: Boolean = false)
(implicit p: Parameters) extends BoomModule
{
val io = IO(new Bundle {
val req = Flipped(new DecoupledIO(new FuncUnitReq(dataWidth)))
val resp = (new DecoupledIO(new FuncUnitResp(dataWidth)))
val brupdate = Input(new BrUpdateInfo())
val bypass = Output(Vec(numBypassStages, Valid(new ExeUnitResp(dataWidth))))
// only used by the fpu unit
val fcsr_rm = if (needsFcsr) Input(UInt(tile.FPConstants.RM_SZ.W)) else null
// only used by branch unit
val brinfo = if (isAluUnit) Output(new BrResolutionInfo()) else null
val get_ftq_pc = if (isJmpUnit) Flipped(new GetPCFromFtqIO()) else null
val status = if (isMemAddrCalcUnit) Input(new freechips.rocketchip.rocket.MStatus()) else null
// only used by memaddr calc unit
val bp = if (isMemAddrCalcUnit) Input(Vec(nBreakpoints, new BP)) else null
val mcontext = if (isMemAddrCalcUnit) Input(UInt(coreParams.mcontextWidth.W)) else null
val scontext = if (isMemAddrCalcUnit) Input(UInt(coreParams.scontextWidth.W)) else null
})
io.bypass.foreach { b => b.valid := false.B; b.bits := DontCare }
io.resp.valid := false.B
io.resp.bits := DontCare
if (isJmpUnit) {
io.get_ftq_pc.ftq_idx := DontCare
}
}
/**
* Abstract top level pipelined functional unit
*
* Note: this helps track which uops get killed while in intermediate stages,
* but it is the job of the consumer to check for kills on the same cycle as consumption!!!
*
* @param numStages how many pipeline stages does the functional unit have
* @param numBypassStages how many bypass stages does the function unit have
* @param earliestBypassStage first stage that you can start bypassing from
* @param dataWidth width of the data being operated on in the functional unit
* @param hasBranchUnit does this functional unit have a branch unit?
*/
abstract class PipelinedFunctionalUnit(
numStages: Int,
numBypassStages: Int,
earliestBypassStage: Int,
dataWidth: Int,
isJmpUnit: Boolean = false,
isAluUnit: Boolean = false,
isMemAddrCalcUnit: Boolean = false,
needsFcsr: Boolean = false
)(implicit p: Parameters) extends FunctionalUnit(
isPipelined = true,
numStages = numStages,
numBypassStages = numBypassStages,
dataWidth = dataWidth,
isJmpUnit = isJmpUnit,
isAluUnit = isAluUnit,
isMemAddrCalcUnit = isMemAddrCalcUnit,
needsFcsr = needsFcsr)
{
// Pipelined functional unit is always ready.
io.req.ready := true.B
if (numStages > 0) {
val r_valids = RegInit(VecInit(Seq.fill(numStages) { false.B }))
val r_uops = Reg(Vec(numStages, new MicroOp()))
// handle incoming request
r_valids(0) := io.req.valid && !IsKilledByBranch(io.brupdate, io.req.bits.uop) && !io.req.bits.kill
r_uops(0) := io.req.bits.uop
r_uops(0).br_mask := GetNewBrMask(io.brupdate, io.req.bits.uop)
// handle middle of the pipeline
for (i <- 1 until numStages) {
r_valids(i) := r_valids(i-1) && !IsKilledByBranch(io.brupdate, r_uops(i-1)) && !io.req.bits.kill
r_uops(i) := r_uops(i-1)
r_uops(i).br_mask := GetNewBrMask(io.brupdate, r_uops(i-1))
if (numBypassStages > 0) {
io.bypass(i-1).bits.uop := r_uops(i-1)
}
}
// handle outgoing (branch could still kill it)
// consumer must also check for pipeline flushes (kills)
io.resp.valid := r_valids(numStages-1) && !IsKilledByBranch(io.brupdate, r_uops(numStages-1))
io.resp.bits.predicated := false.B
io.resp.bits.uop := r_uops(numStages-1)
io.resp.bits.uop.br_mask := GetNewBrMask(io.brupdate, r_uops(numStages-1))
// bypassing (TODO allow bypass vector to have a different size from numStages)
if (numBypassStages > 0 && earliestBypassStage == 0) {
io.bypass(0).bits.uop := io.req.bits.uop
for (i <- 1 until numBypassStages) {
io.bypass(i).bits.uop := r_uops(i-1)
}
}
} else {
require (numStages == 0)
// pass req straight through to response
// valid doesn't check kill signals, let consumer deal with it.
// The LSU already handles it and this hurts critical path.
io.resp.valid := io.req.valid && !IsKilledByBranch(io.brupdate, io.req.bits.uop)
io.resp.bits.predicated := false.B
io.resp.bits.uop := io.req.bits.uop
io.resp.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.req.bits.uop)
}
}
/**
* Functional unit that wraps RocketChips ALU
*
* @param isBranchUnit is this a branch unit?
* @param numStages how many pipeline stages does the functional unit have
* @param dataWidth width of the data being operated on in the functional unit
*/
class ALUUnit(isJmpUnit: Boolean = false, numStages: Int = 1, dataWidth: Int)(implicit p: Parameters)
extends PipelinedFunctionalUnit(
numStages = numStages,
numBypassStages = numStages,
isAluUnit = true,
earliestBypassStage = 0,
dataWidth = dataWidth,
isJmpUnit = isJmpUnit)
with boom.v3.ifu.HasBoomFrontendParameters
{
val uop = io.req.bits.uop
// immediate generation
val imm_xprlen = ImmGen(uop.imm_packed, uop.ctrl.imm_sel)
// operand 1 select
var op1_data: UInt = null
if (isJmpUnit) {
// Get the uop PC for jumps
val block_pc = AlignPCToBoundary(io.get_ftq_pc.pc, icBlockBytes)
val uop_pc = (block_pc | uop.pc_lob) - Mux(uop.edge_inst, 2.U, 0.U)
op1_data = Mux(uop.ctrl.op1_sel.asUInt === OP1_RS1 , io.req.bits.rs1_data,
Mux(uop.ctrl.op1_sel.asUInt === OP1_PC , Sext(uop_pc, xLen),
0.U))
} else {
op1_data = Mux(uop.ctrl.op1_sel.asUInt === OP1_RS1 , io.req.bits.rs1_data,
0.U)
}
// operand 2 select
val op2_data = Mux(uop.ctrl.op2_sel === OP2_IMM, Sext(imm_xprlen.asUInt, xLen),
Mux(uop.ctrl.op2_sel === OP2_IMMC, io.req.bits.uop.prs1(4,0),
Mux(uop.ctrl.op2_sel === OP2_RS2 , io.req.bits.rs2_data,
Mux(uop.ctrl.op2_sel === OP2_NEXT, Mux(uop.is_rvc, 2.U, 4.U),
0.U))))
val alu = Module(new freechips.rocketchip.rocket.ALU())
alu.io.in1 := op1_data.asUInt
alu.io.in2 := op2_data.asUInt
alu.io.fn := uop.ctrl.op_fcn
alu.io.dw := uop.ctrl.fcn_dw
// Did I just get killed by the previous cycle's branch,
// or by a flush pipeline?
val killed = WireInit(false.B)
when (io.req.bits.kill || IsKilledByBranch(io.brupdate, uop)) {
killed := true.B
}
val rs1 = io.req.bits.rs1_data
val rs2 = io.req.bits.rs2_data
val br_eq = (rs1 === rs2)
val br_ltu = (rs1.asUInt < rs2.asUInt)
val br_lt = (~(rs1(xLen-1) ^ rs2(xLen-1)) & br_ltu |
rs1(xLen-1) & ~rs2(xLen-1)).asBool
val pc_sel = MuxLookup(uop.ctrl.br_type, PC_PLUS4)(
Seq( BR_N -> PC_PLUS4,
BR_NE -> Mux(!br_eq, PC_BRJMP, PC_PLUS4),
BR_EQ -> Mux( br_eq, PC_BRJMP, PC_PLUS4),
BR_GE -> Mux(!br_lt, PC_BRJMP, PC_PLUS4),
BR_GEU -> Mux(!br_ltu, PC_BRJMP, PC_PLUS4),
BR_LT -> Mux( br_lt, PC_BRJMP, PC_PLUS4),
BR_LTU -> Mux( br_ltu, PC_BRJMP, PC_PLUS4),
BR_J -> PC_BRJMP,
BR_JR -> PC_JALR
))
val is_taken = io.req.valid &&
!killed &&
(uop.is_br || uop.is_jalr || uop.is_jal) &&
(pc_sel =/= PC_PLUS4)
// "mispredict" means that a branch has been resolved and it must be killed
val mispredict = WireInit(false.B)
val is_br = io.req.valid && !killed && uop.is_br && !uop.is_sfb
val is_jal = io.req.valid && !killed && uop.is_jal
val is_jalr = io.req.valid && !killed && uop.is_jalr
when (is_br || is_jalr) {
if (!isJmpUnit) {
assert (pc_sel =/= PC_JALR)
}
when (pc_sel === PC_PLUS4) {
mispredict := uop.taken
}
when (pc_sel === PC_BRJMP) {
mispredict := !uop.taken
}
}
val brinfo = Wire(new BrResolutionInfo)
// note: jal doesn't allocate a branch-mask, so don't clear a br-mask bit
brinfo.valid := is_br || is_jalr
brinfo.mispredict := mispredict
brinfo.uop := uop
brinfo.cfi_type := Mux(is_jalr, CFI_JALR,
Mux(is_br , CFI_BR, CFI_X))
brinfo.taken := is_taken
brinfo.pc_sel := pc_sel
brinfo.jalr_target := DontCare
// Branch/Jump Target Calculation
// For jumps we read the FTQ, and can calculate the target
// For branches we emit the offset for the core to redirect if necessary
val target_offset = imm_xprlen(20,0).asSInt
brinfo.jalr_target := DontCare
if (isJmpUnit) {
def encodeVirtualAddress(a0: UInt, ea: UInt) = if (vaddrBitsExtended == vaddrBits) {
ea
} else {
// Efficient means to compress 64-bit VA into vaddrBits+1 bits.
// (VA is bad if VA(vaddrBits) != VA(vaddrBits-1)).
val a = a0.asSInt >> vaddrBits
val msb = Mux(a === 0.S || a === -1.S, ea(vaddrBits), !ea(vaddrBits-1))
Cat(msb, ea(vaddrBits-1,0))
}
val jalr_target_base = io.req.bits.rs1_data.asSInt
val jalr_target_xlen = Wire(UInt(xLen.W))
jalr_target_xlen := (jalr_target_base + target_offset).asUInt
val jalr_target = (encodeVirtualAddress(jalr_target_xlen, jalr_target_xlen).asSInt & -2.S).asUInt
brinfo.jalr_target := jalr_target
val cfi_idx = ((uop.pc_lob ^ Mux(io.get_ftq_pc.entry.start_bank === 1.U, 1.U << log2Ceil(bankBytes), 0.U)))(log2Ceil(fetchWidth),1)
when (pc_sel === PC_JALR) {
mispredict := !io.get_ftq_pc.next_val ||
(io.get_ftq_pc.next_pc =/= jalr_target) ||
!io.get_ftq_pc.entry.cfi_idx.valid ||
(io.get_ftq_pc.entry.cfi_idx.bits =/= cfi_idx)
}
}
brinfo.target_offset := target_offset
io.brinfo := brinfo
// Response
// TODO add clock gate on resp bits from functional units
// io.resp.bits.data := RegEnable(alu.io.out, io.req.valid)
// val reg_data = Reg(outType = Bits(width = xLen))
// reg_data := alu.io.out
// io.resp.bits.data := reg_data
val r_val = RegInit(VecInit(Seq.fill(numStages) { false.B }))
val r_data = Reg(Vec(numStages, UInt(xLen.W)))
val r_pred = Reg(Vec(numStages, Bool()))
val alu_out = Mux(io.req.bits.uop.is_sfb_shadow && io.req.bits.pred_data,
Mux(io.req.bits.uop.ldst_is_rs1, io.req.bits.rs1_data, io.req.bits.rs2_data),
Mux(io.req.bits.uop.uopc === uopMOV, io.req.bits.rs2_data, alu.io.out))
r_val (0) := io.req.valid
r_data(0) := Mux(io.req.bits.uop.is_sfb_br, pc_sel === PC_BRJMP, alu_out)
r_pred(0) := io.req.bits.uop.is_sfb_shadow && io.req.bits.pred_data
for (i <- 1 until numStages) {
r_val(i) := r_val(i-1)
r_data(i) := r_data(i-1)
r_pred(i) := r_pred(i-1)
}
io.resp.bits.data := r_data(numStages-1)
io.resp.bits.predicated := r_pred(numStages-1)
// Bypass
// for the ALU, we can bypass same cycle as compute
require (numStages >= 1)
require (numBypassStages >= 1)
io.bypass(0).valid := io.req.valid
io.bypass(0).bits.data := Mux(io.req.bits.uop.is_sfb_br, pc_sel === PC_BRJMP, alu_out)
for (i <- 1 until numStages) {
io.bypass(i).valid := r_val(i-1)
io.bypass(i).bits.data := r_data(i-1)
}
// Exceptions
io.resp.bits.fflags.valid := false.B
}
/**
* Functional unit that passes in base+imm to calculate addresses, and passes store data
* to the LSU.
* For floating point, 65bit FP store-data needs to be decoded into 64bit FP form
*/
class MemAddrCalcUnit(implicit p: Parameters)
extends PipelinedFunctionalUnit(
numStages = 0,
numBypassStages = 0,
earliestBypassStage = 0,
dataWidth = 65, // TODO enable this only if FP is enabled?
isMemAddrCalcUnit = true)
with freechips.rocketchip.rocket.constants.MemoryOpConstants
with freechips.rocketchip.rocket.constants.ScalarOpConstants
{
// perform address calculation
val sum = (io.req.bits.rs1_data.asSInt + io.req.bits.uop.imm_packed(19,8).asSInt).asUInt
val ea_sign = Mux(sum(vaddrBits-1), ~sum(63,vaddrBits) === 0.U,
sum(63,vaddrBits) =/= 0.U)
val effective_address = Cat(ea_sign, sum(vaddrBits-1,0)).asUInt
val store_data = io.req.bits.rs2_data
io.resp.bits.addr := effective_address
io.resp.bits.data := store_data
if (dataWidth > 63) {
assert (!(io.req.valid && io.req.bits.uop.ctrl.is_std &&
io.resp.bits.data(64).asBool === true.B), "65th bit set in MemAddrCalcUnit.")
assert (!(io.req.valid && io.req.bits.uop.ctrl.is_std && io.req.bits.uop.fp_val),
"FP store-data should now be going through a different unit.")
}
assert (!(io.req.bits.uop.fp_val && io.req.valid && io.req.bits.uop.uopc =/=
uopLD && io.req.bits.uop.uopc =/= uopSTA),
"[maddrcalc] assert we never get store data in here.")
// Handle misaligned exceptions
val size = io.req.bits.uop.mem_size
val misaligned =
(size === 1.U && (effective_address(0) =/= 0.U)) ||
(size === 2.U && (effective_address(1,0) =/= 0.U)) ||
(size === 3.U && (effective_address(2,0) =/= 0.U))
val bkptu = Module(new BreakpointUnit(nBreakpoints))
bkptu.io.status := io.status
bkptu.io.bp := io.bp
bkptu.io.pc := DontCare
bkptu.io.ea := effective_address
bkptu.io.mcontext := io.mcontext
bkptu.io.scontext := io.scontext
val ma_ld = io.req.valid && io.req.bits.uop.uopc === uopLD && misaligned
val ma_st = io.req.valid && (io.req.bits.uop.uopc === uopSTA || io.req.bits.uop.uopc === uopAMO_AG) && misaligned
val dbg_bp = io.req.valid && ((io.req.bits.uop.uopc === uopLD && bkptu.io.debug_ld) ||
(io.req.bits.uop.uopc === uopSTA && bkptu.io.debug_st))
val bp = io.req.valid && ((io.req.bits.uop.uopc === uopLD && bkptu.io.xcpt_ld) ||
(io.req.bits.uop.uopc === uopSTA && bkptu.io.xcpt_st))
def checkExceptions(x: Seq[(Bool, UInt)]) =
(x.map(_._1).reduce(_||_), PriorityMux(x))
val (xcpt_val, xcpt_cause) = checkExceptions(List(
(ma_ld, (Causes.misaligned_load).U),
(ma_st, (Causes.misaligned_store).U),
(dbg_bp, (CSR.debugTriggerCause).U),
(bp, (Causes.breakpoint).U)))
io.resp.bits.mxcpt.valid := xcpt_val
io.resp.bits.mxcpt.bits := xcpt_cause
assert (!(ma_ld && ma_st), "Mutually-exclusive exceptions are firing.")
io.resp.bits.sfence.valid := io.req.valid && io.req.bits.uop.mem_cmd === M_SFENCE
io.resp.bits.sfence.bits.rs1 := io.req.bits.uop.mem_size(0)
io.resp.bits.sfence.bits.rs2 := io.req.bits.uop.mem_size(1)
io.resp.bits.sfence.bits.addr := io.req.bits.rs1_data
io.resp.bits.sfence.bits.asid := io.req.bits.rs2_data
}
/**
* Functional unit to wrap lower level FPU
*
* Currently, bypassing is unsupported!
* All FP instructions are padded out to the max latency unit for easy
* write-port scheduling.
*/
class FPUUnit(implicit p: Parameters)
extends PipelinedFunctionalUnit(
numStages = p(tile.TileKey).core.fpu.get.dfmaLatency,
numBypassStages = 0,
earliestBypassStage = 0,
dataWidth = 65,
needsFcsr = true)
{
val fpu = Module(new FPU())
fpu.io.req.valid := io.req.valid
fpu.io.req.bits.uop := io.req.bits.uop
fpu.io.req.bits.rs1_data := io.req.bits.rs1_data
fpu.io.req.bits.rs2_data := io.req.bits.rs2_data
fpu.io.req.bits.rs3_data := io.req.bits.rs3_data
fpu.io.req.bits.fcsr_rm := io.fcsr_rm
io.resp.bits.data := fpu.io.resp.bits.data
io.resp.bits.fflags.valid := fpu.io.resp.bits.fflags.valid
io.resp.bits.fflags.bits.uop := io.resp.bits.uop
io.resp.bits.fflags.bits.flags := fpu.io.resp.bits.fflags.bits.flags // kill me now
}
/**
* Int to FP conversion functional unit
*
* @param latency the amount of stages to delay by
*/
class IntToFPUnit(latency: Int)(implicit p: Parameters)
extends PipelinedFunctionalUnit(
numStages = latency,
numBypassStages = 0,
earliestBypassStage = 0,
dataWidth = 65,
needsFcsr = true)
with tile.HasFPUParameters
{
val fp_decoder = Module(new UOPCodeFPUDecoder) // TODO use a simpler decoder
val io_req = io.req.bits
fp_decoder.io.uopc := io_req.uop.uopc
val fp_ctrl = fp_decoder.io.sigs
val fp_rm = Mux(ImmGenRm(io_req.uop.imm_packed) === 7.U, io.fcsr_rm, ImmGenRm(io_req.uop.imm_packed))
val req = Wire(new tile.FPInput)
val tag = fp_ctrl.typeTagIn
req.viewAsSupertype(new tile.FPUCtrlSigs) := fp_ctrl
req.rm := fp_rm
req.in1 := unbox(io_req.rs1_data, tag, None)
req.in2 := unbox(io_req.rs2_data, tag, None)
req.in3 := DontCare
req.typ := ImmGenTyp(io_req.uop.imm_packed)
req.fmt := DontCare // FIXME: this may not be the right thing to do here
req.fmaCmd := DontCare
assert (!(io.req.valid && fp_ctrl.fromint && req.in1(xLen).asBool),
"[func] IntToFP integer input has 65th high-order bit set!")
assert (!(io.req.valid && !fp_ctrl.fromint),
"[func] Only support fromInt micro-ops.")
val ifpu = Module(new tile.IntToFP(intToFpLatency))
ifpu.io.in.valid := io.req.valid
ifpu.io.in.bits := req
ifpu.io.in.bits.in1 := io_req.rs1_data
val out_double = Pipe(io.req.valid, fp_ctrl.typeTagOut === D, intToFpLatency).bits
//io.resp.bits.data := box(ifpu.io.out.bits.data, !io.resp.bits.uop.fp_single)
io.resp.bits.data := box(ifpu.io.out.bits.data, out_double)
io.resp.bits.fflags.valid := ifpu.io.out.valid
io.resp.bits.fflags.bits.uop := io.resp.bits.uop
io.resp.bits.fflags.bits.flags := ifpu.io.out.bits.exc
}
/**
* Iterative/unpipelined functional unit, can only hold a single MicroOp at a time
* assumes at least one register between request and response
*
* TODO allow up to N micro-ops simultaneously.
*
* @param dataWidth width of the data to be passed into the functional unit
*/
abstract class IterativeFunctionalUnit(dataWidth: Int)(implicit p: Parameters)
extends FunctionalUnit(
isPipelined = false,
numStages = 1,
numBypassStages = 0,
dataWidth = dataWidth)
{
val r_uop = Reg(new MicroOp())
val do_kill = Wire(Bool())
do_kill := io.req.bits.kill // irrelevant default
when (io.req.fire) {
// update incoming uop
do_kill := IsKilledByBranch(io.brupdate, io.req.bits.uop) || io.req.bits.kill
r_uop := io.req.bits.uop
r_uop.br_mask := GetNewBrMask(io.brupdate, io.req.bits.uop)
} .otherwise {
do_kill := IsKilledByBranch(io.brupdate, r_uop) || io.req.bits.kill
r_uop.br_mask := GetNewBrMask(io.brupdate, r_uop)
}
// assumes at least one pipeline register between request and response
io.resp.bits.uop := r_uop
}
/**
* Divide functional unit.
*
* @param dataWidth data to be passed into the functional unit
*/
class DivUnit(dataWidth: Int)(implicit p: Parameters)
extends IterativeFunctionalUnit(dataWidth)
{
// We don't use the iterative multiply functionality here.
// Instead we use the PipelinedMultiplier
val div = Module(new freechips.rocketchip.rocket.MulDiv(mulDivParams, width = dataWidth))
// request
div.io.req.valid := io.req.valid && !this.do_kill
div.io.req.bits.dw := io.req.bits.uop.ctrl.fcn_dw
div.io.req.bits.fn := io.req.bits.uop.ctrl.op_fcn
div.io.req.bits.in1 := io.req.bits.rs1_data
div.io.req.bits.in2 := io.req.bits.rs2_data
div.io.req.bits.tag := DontCare
io.req.ready := div.io.req.ready
// handle pipeline kills and branch misspeculations
div.io.kill := this.do_kill
// response
io.resp.valid := div.io.resp.valid && !this.do_kill
div.io.resp.ready := io.resp.ready
io.resp.bits.data := div.io.resp.bits.data
}
/**
* Pipelined multiplier functional unit that wraps around the RocketChip pipelined multiplier
*
* @param numStages number of pipeline stages
* @param dataWidth size of the data being passed into the functional unit
*/
class PipelinedMulUnit(numStages: Int, dataWidth: Int)(implicit p: Parameters)
extends PipelinedFunctionalUnit(
numStages = numStages,
numBypassStages = 0,
earliestBypassStage = 0,
dataWidth = dataWidth)
{
val imul = Module(new PipelinedMultiplier(xLen, numStages))
// request
imul.io.req.valid := io.req.valid
imul.io.req.bits.fn := io.req.bits.uop.ctrl.op_fcn
imul.io.req.bits.dw := io.req.bits.uop.ctrl.fcn_dw
imul.io.req.bits.in1 := io.req.bits.rs1_data
imul.io.req.bits.in2 := io.req.bits.rs2_data
imul.io.req.bits.tag := DontCare
// response
io.resp.bits.data := imul.io.resp.bits.data
} | module PipelinedMulUnit(
input clock,
input reset,
input io_req_valid,
input [4:0] io_req_bits_uop_ctrl_op_fcn,
input io_req_bits_uop_ctrl_fcn_dw,
input [7:0] io_req_bits_uop_br_mask,
input [4:0] io_req_bits_uop_rob_idx,
input [5:0] io_req_bits_uop_pdst,
input io_req_bits_uop_bypassable,
input io_req_bits_uop_is_amo,
input io_req_bits_uop_uses_stq,
input [1:0] io_req_bits_uop_dst_rtype,
input [63:0] io_req_bits_rs1_data,
input [63:0] io_req_bits_rs2_data,
input io_req_bits_kill,
output io_resp_valid,
output [4:0] io_resp_bits_uop_rob_idx,
output [5:0] io_resp_bits_uop_pdst,
output io_resp_bits_uop_bypassable,
output io_resp_bits_uop_is_amo,
output io_resp_bits_uop_uses_stq,
output [1:0] io_resp_bits_uop_dst_rtype,
output [63:0] io_resp_bits_data,
input [7:0] io_brupdate_b1_resolve_mask,
input [7:0] io_brupdate_b1_mispredict_mask
);
reg r_valids_0;
reg r_valids_1;
reg r_valids_2;
reg [7:0] r_uops_0_br_mask;
reg [4:0] r_uops_0_rob_idx;
reg [5:0] r_uops_0_pdst;
reg r_uops_0_bypassable;
reg r_uops_0_is_amo;
reg r_uops_0_uses_stq;
reg [1:0] r_uops_0_dst_rtype;
reg [7:0] r_uops_1_br_mask;
reg [4:0] r_uops_1_rob_idx;
reg [5:0] r_uops_1_pdst;
reg r_uops_1_bypassable;
reg r_uops_1_is_amo;
reg r_uops_1_uses_stq;
reg [1:0] r_uops_1_dst_rtype;
reg [7:0] r_uops_2_br_mask;
reg [4:0] r_uops_2_rob_idx;
reg [5:0] r_uops_2_pdst;
reg r_uops_2_bypassable;
reg r_uops_2_is_amo;
reg r_uops_2_uses_stq;
reg [1:0] r_uops_2_dst_rtype;
always @(posedge clock) begin
if (reset) begin
r_valids_0 <= 1'h0;
r_valids_1 <= 1'h0;
r_valids_2 <= 1'h0;
end
else begin
r_valids_0 <= io_req_valid & (io_brupdate_b1_mispredict_mask & io_req_bits_uop_br_mask) == 8'h0 & ~io_req_bits_kill;
r_valids_1 <= r_valids_0 & (io_brupdate_b1_mispredict_mask & r_uops_0_br_mask) == 8'h0 & ~io_req_bits_kill;
r_valids_2 <= r_valids_1 & (io_brupdate_b1_mispredict_mask & r_uops_1_br_mask) == 8'h0 & ~io_req_bits_kill;
end
r_uops_0_br_mask <= io_req_bits_uop_br_mask & ~io_brupdate_b1_resolve_mask;
r_uops_0_rob_idx <= io_req_bits_uop_rob_idx;
r_uops_0_pdst <= io_req_bits_uop_pdst;
r_uops_0_bypassable <= io_req_bits_uop_bypassable;
r_uops_0_is_amo <= io_req_bits_uop_is_amo;
r_uops_0_uses_stq <= io_req_bits_uop_uses_stq;
r_uops_0_dst_rtype <= io_req_bits_uop_dst_rtype;
r_uops_1_br_mask <= r_uops_0_br_mask & ~io_brupdate_b1_resolve_mask;
r_uops_1_rob_idx <= r_uops_0_rob_idx;
r_uops_1_pdst <= r_uops_0_pdst;
r_uops_1_bypassable <= r_uops_0_bypassable;
r_uops_1_is_amo <= r_uops_0_is_amo;
r_uops_1_uses_stq <= r_uops_0_uses_stq;
r_uops_1_dst_rtype <= r_uops_0_dst_rtype;
r_uops_2_br_mask <= r_uops_1_br_mask & ~io_brupdate_b1_resolve_mask;
r_uops_2_rob_idx <= r_uops_1_rob_idx;
r_uops_2_pdst <= r_uops_1_pdst;
r_uops_2_bypassable <= r_uops_1_bypassable;
r_uops_2_is_amo <= r_uops_1_is_amo;
r_uops_2_uses_stq <= r_uops_1_uses_stq;
r_uops_2_dst_rtype <= r_uops_1_dst_rtype;
end
PipelinedMultiplier imul (
.clock (clock),
.reset (reset),
.io_req_valid (io_req_valid),
.io_req_bits_fn (io_req_bits_uop_ctrl_op_fcn),
.io_req_bits_dw (io_req_bits_uop_ctrl_fcn_dw),
.io_req_bits_in1 (io_req_bits_rs1_data),
.io_req_bits_in2 (io_req_bits_rs2_data),
.io_resp_bits_data (io_resp_bits_data)
);
assign io_resp_valid = r_valids_2 & (io_brupdate_b1_mispredict_mask & r_uops_2_br_mask) == 8'h0;
assign io_resp_bits_uop_rob_idx = r_uops_2_rob_idx;
assign io_resp_bits_uop_pdst = r_uops_2_pdst;
assign io_resp_bits_uop_bypassable = r_uops_2_bypassable;
assign io_resp_bits_uop_is_amo = r_uops_2_is_amo;
assign io_resp_bits_uop_uses_stq = r_uops_2_uses_stq;
assign io_resp_bits_uop_dst_rtype = r_uops_2_dst_rtype;
endmodule |
Generate the Verilog code corresponding to this Chisel code package boom.v3.ifu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import boom.v3.common._
import boom.v3.util.{BoomCoreStringPrefix, MaskLower, WrapInc}
import scala.math.min
class TageResp extends Bundle {
val ctr = UInt(3.W)
val u = UInt(2.W)
}
class TageTable(val nRows: Int, val tagSz: Int, val histLength: Int, val uBitPeriod: Int)
(implicit p: Parameters) extends BoomModule()(p)
with HasBoomFrontendParameters
{
require(histLength <= globalHistoryLength)
val nWrBypassEntries = 2
val io = IO( new Bundle {
val f1_req_valid = Input(Bool())
val f1_req_pc = Input(UInt(vaddrBitsExtended.W))
val f1_req_ghist = Input(UInt(globalHistoryLength.W))
val f3_resp = Output(Vec(bankWidth, Valid(new TageResp)))
val update_mask = Input(Vec(bankWidth, Bool()))
val update_taken = Input(Vec(bankWidth, Bool()))
val update_alloc = Input(Vec(bankWidth, Bool()))
val update_old_ctr = Input(Vec(bankWidth, UInt(3.W)))
val update_pc = Input(UInt())
val update_hist = Input(UInt())
val update_u_mask = Input(Vec(bankWidth, Bool()))
val update_u = Input(Vec(bankWidth, UInt(2.W)))
})
def compute_folded_hist(hist: UInt, l: Int) = {
val nChunks = (histLength + l - 1) / l
val hist_chunks = (0 until nChunks) map {i =>
hist(min((i+1)*l, histLength)-1, i*l)
}
hist_chunks.reduce(_^_)
}
def compute_tag_and_hash(unhashed_idx: UInt, hist: UInt) = {
val idx_history = compute_folded_hist(hist, log2Ceil(nRows))
val idx = (unhashed_idx ^ idx_history)(log2Ceil(nRows)-1,0)
val tag_history = compute_folded_hist(hist, tagSz)
val tag = ((unhashed_idx >> log2Ceil(nRows)) ^ tag_history)(tagSz-1,0)
(idx, tag)
}
def inc_ctr(ctr: UInt, taken: Bool): UInt = {
Mux(!taken, Mux(ctr === 0.U, 0.U, ctr - 1.U),
Mux(ctr === 7.U, 7.U, ctr + 1.U))
}
val doing_reset = RegInit(true.B)
val reset_idx = RegInit(0.U(log2Ceil(nRows).W))
reset_idx := reset_idx + doing_reset
when (reset_idx === (nRows-1).U) { doing_reset := false.B }
class TageEntry extends Bundle {
val valid = Bool() // TODO: Remove this valid bit
val tag = UInt(tagSz.W)
val ctr = UInt(3.W)
}
val tageEntrySz = 1 + tagSz + 3
val (s1_hashed_idx, s1_tag) = compute_tag_and_hash(fetchIdx(io.f1_req_pc), io.f1_req_ghist)
val hi_us = SyncReadMem(nRows, Vec(bankWidth, Bool()))
val lo_us = SyncReadMem(nRows, Vec(bankWidth, Bool()))
val table = SyncReadMem(nRows, Vec(bankWidth, UInt(tageEntrySz.W)))
val mems = Seq((f"tage_l$histLength", nRows, bankWidth * tageEntrySz))
val s2_tag = RegNext(s1_tag)
val s2_req_rtage = VecInit(table.read(s1_hashed_idx, io.f1_req_valid).map(_.asTypeOf(new TageEntry)))
val s2_req_rhius = hi_us.read(s1_hashed_idx, io.f1_req_valid)
val s2_req_rlous = lo_us.read(s1_hashed_idx, io.f1_req_valid)
val s2_req_rhits = VecInit(s2_req_rtage.map(e => e.valid && e.tag === s2_tag && !doing_reset))
for (w <- 0 until bankWidth) {
// This bit indicates the TAGE table matched here
io.f3_resp(w).valid := RegNext(s2_req_rhits(w))
io.f3_resp(w).bits.u := RegNext(Cat(s2_req_rhius(w), s2_req_rlous(w)))
io.f3_resp(w).bits.ctr := RegNext(s2_req_rtage(w).ctr)
}
val clear_u_ctr = RegInit(0.U((log2Ceil(uBitPeriod) + log2Ceil(nRows) + 1).W))
when (doing_reset) { clear_u_ctr := 1.U } .otherwise { clear_u_ctr := clear_u_ctr + 1.U }
val doing_clear_u = clear_u_ctr(log2Ceil(uBitPeriod)-1,0) === 0.U
val doing_clear_u_hi = doing_clear_u && clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 1.U
val doing_clear_u_lo = doing_clear_u && clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 0.U
val clear_u_idx = clear_u_ctr >> log2Ceil(uBitPeriod)
val (update_idx, update_tag) = compute_tag_and_hash(fetchIdx(io.update_pc), io.update_hist)
val update_wdata = Wire(Vec(bankWidth, new TageEntry))
table.write(
Mux(doing_reset, reset_idx , update_idx),
Mux(doing_reset, VecInit(Seq.fill(bankWidth) { 0.U(tageEntrySz.W) }), VecInit(update_wdata.map(_.asUInt))),
Mux(doing_reset, ~(0.U(bankWidth.W)) , io.update_mask.asUInt).asBools
)
val update_hi_wdata = Wire(Vec(bankWidth, Bool()))
hi_us.write(
Mux(doing_reset, reset_idx, Mux(doing_clear_u_hi, clear_u_idx, update_idx)),
Mux(doing_reset || doing_clear_u_hi, VecInit((0.U(bankWidth.W)).asBools), update_hi_wdata),
Mux(doing_reset || doing_clear_u_hi, ~(0.U(bankWidth.W)), io.update_u_mask.asUInt).asBools
)
val update_lo_wdata = Wire(Vec(bankWidth, Bool()))
lo_us.write(
Mux(doing_reset, reset_idx, Mux(doing_clear_u_lo, clear_u_idx, update_idx)),
Mux(doing_reset || doing_clear_u_lo, VecInit((0.U(bankWidth.W)).asBools), update_lo_wdata),
Mux(doing_reset || doing_clear_u_lo, ~(0.U(bankWidth.W)), io.update_u_mask.asUInt).asBools
)
val wrbypass_tags = Reg(Vec(nWrBypassEntries, UInt(tagSz.W)))
val wrbypass_idxs = Reg(Vec(nWrBypassEntries, UInt(log2Ceil(nRows).W)))
val wrbypass = Reg(Vec(nWrBypassEntries, Vec(bankWidth, UInt(3.W))))
val wrbypass_enq_idx = RegInit(0.U(log2Ceil(nWrBypassEntries).W))
val wrbypass_hits = VecInit((0 until nWrBypassEntries) map { i =>
!doing_reset &&
wrbypass_tags(i) === update_tag &&
wrbypass_idxs(i) === update_idx
})
val wrbypass_hit = wrbypass_hits.reduce(_||_)
val wrbypass_hit_idx = PriorityEncoder(wrbypass_hits)
for (w <- 0 until bankWidth) {
update_wdata(w).ctr := Mux(io.update_alloc(w),
Mux(io.update_taken(w), 4.U,
3.U
),
Mux(wrbypass_hit, inc_ctr(wrbypass(wrbypass_hit_idx)(w), io.update_taken(w)),
inc_ctr(io.update_old_ctr(w), io.update_taken(w))
)
)
update_wdata(w).valid := true.B
update_wdata(w).tag := update_tag
update_hi_wdata(w) := io.update_u(w)(1)
update_lo_wdata(w) := io.update_u(w)(0)
}
when (io.update_mask.reduce(_||_)) {
when (wrbypass_hits.reduce(_||_)) {
wrbypass(wrbypass_hit_idx) := VecInit(update_wdata.map(_.ctr))
} .otherwise {
wrbypass (wrbypass_enq_idx) := VecInit(update_wdata.map(_.ctr))
wrbypass_tags(wrbypass_enq_idx) := update_tag
wrbypass_idxs(wrbypass_enq_idx) := update_idx
wrbypass_enq_idx := WrapInc(wrbypass_enq_idx, nWrBypassEntries)
}
}
}
case class BoomTageParams(
// nSets, histLen, tagSz
tableInfo: Seq[Tuple3[Int, Int, Int]] = Seq(( 128, 2, 7),
( 128, 4, 7),
( 256, 8, 8),
( 256, 16, 8),
( 128, 32, 9),
( 128, 64, 9)),
uBitPeriod: Int = 2048
)
class TageBranchPredictorBank(params: BoomTageParams = BoomTageParams())(implicit p: Parameters) extends BranchPredictorBank()(p)
{
val tageUBitPeriod = params.uBitPeriod
val tageNTables = params.tableInfo.size
class TageMeta extends Bundle
{
val provider = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W)))
val alt_differs = Vec(bankWidth, Output(Bool()))
val provider_u = Vec(bankWidth, Output(UInt(2.W)))
val provider_ctr = Vec(bankWidth, Output(UInt(3.W)))
val allocate = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W)))
}
val f3_meta = Wire(new TageMeta)
override val metaSz = f3_meta.asUInt.getWidth
require(metaSz <= bpdMaxMetaLength)
def inc_u(u: UInt, alt_differs: Bool, mispredict: Bool): UInt = {
Mux(!alt_differs, u,
Mux(mispredict, Mux(u === 0.U, 0.U, u - 1.U),
Mux(u === 3.U, 3.U, u + 1.U)))
}
val tt = params.tableInfo map {
case (n, l, s) => {
val t = Module(new TageTable(n, s, l, params.uBitPeriod))
t.io.f1_req_valid := RegNext(io.f0_valid)
t.io.f1_req_pc := RegNext(io.f0_pc)
t.io.f1_req_ghist := io.f1_ghist
(t, t.mems)
}
}
val tables = tt.map(_._1)
val mems = tt.map(_._2).flatten
val f3_resps = VecInit(tables.map(_.io.f3_resp))
val s1_update_meta = s1_update.bits.meta.asTypeOf(new TageMeta)
val s1_update_mispredict_mask = UIntToOH(s1_update.bits.cfi_idx.bits) &
Fill(bankWidth, s1_update.bits.cfi_mispredicted)
val s1_update_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, Bool()))))
val s1_update_u_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, UInt(1.W)))))
val s1_update_taken = Wire(Vec(tageNTables, Vec(bankWidth, Bool())))
val s1_update_old_ctr = Wire(Vec(tageNTables, Vec(bankWidth, UInt(3.W))))
val s1_update_alloc = Wire(Vec(tageNTables, Vec(bankWidth, Bool())))
val s1_update_u = Wire(Vec(tageNTables, Vec(bankWidth, UInt(2.W))))
s1_update_taken := DontCare
s1_update_old_ctr := DontCare
s1_update_alloc := DontCare
s1_update_u := DontCare
for (w <- 0 until bankWidth) {
var altpred = io.resp_in(0).f3(w).taken
val final_altpred = WireInit(io.resp_in(0).f3(w).taken)
var provided = false.B
var provider = 0.U
io.resp.f3(w).taken := io.resp_in(0).f3(w).taken
for (i <- 0 until tageNTables) {
val hit = f3_resps(i)(w).valid
val ctr = f3_resps(i)(w).bits.ctr
when (hit) {
io.resp.f3(w).taken := Mux(ctr === 3.U || ctr === 4.U, altpred, ctr(2))
final_altpred := altpred
}
provided = provided || hit
provider = Mux(hit, i.U, provider)
altpred = Mux(hit, f3_resps(i)(w).bits.ctr(2), altpred)
}
f3_meta.provider(w).valid := provided
f3_meta.provider(w).bits := provider
f3_meta.alt_differs(w) := final_altpred =/= io.resp.f3(w).taken
f3_meta.provider_u(w) := f3_resps(provider)(w).bits.u
f3_meta.provider_ctr(w) := f3_resps(provider)(w).bits.ctr
// Create a mask of tables which did not hit our query, and also contain useless entries
// and also uses a longer history than the provider
val allocatable_slots = (
VecInit(f3_resps.map(r => !r(w).valid && r(w).bits.u === 0.U)).asUInt &
~(MaskLower(UIntToOH(provider)) & Fill(tageNTables, provided))
)
val alloc_lfsr = random.LFSR(tageNTables max 2)
val first_entry = PriorityEncoder(allocatable_slots)
val masked_entry = PriorityEncoder(allocatable_slots & alloc_lfsr)
val alloc_entry = Mux(allocatable_slots(masked_entry),
masked_entry,
first_entry)
f3_meta.allocate(w).valid := allocatable_slots =/= 0.U
f3_meta.allocate(w).bits := alloc_entry
val update_was_taken = (s1_update.bits.cfi_idx.valid &&
(s1_update.bits.cfi_idx.bits === w.U) &&
s1_update.bits.cfi_taken)
when (s1_update.bits.br_mask(w) && s1_update.valid && s1_update.bits.is_commit_update) {
when (s1_update_meta.provider(w).valid) {
val provider = s1_update_meta.provider(w).bits
s1_update_mask(provider)(w) := true.B
s1_update_u_mask(provider)(w) := true.B
val new_u = inc_u(s1_update_meta.provider_u(w),
s1_update_meta.alt_differs(w),
s1_update_mispredict_mask(w))
s1_update_u (provider)(w) := new_u
s1_update_taken (provider)(w) := update_was_taken
s1_update_old_ctr(provider)(w) := s1_update_meta.provider_ctr(w)
s1_update_alloc (provider)(w) := false.B
}
}
}
when (s1_update.valid && s1_update.bits.is_commit_update && s1_update.bits.cfi_mispredicted && s1_update.bits.cfi_idx.valid) {
val idx = s1_update.bits.cfi_idx.bits
val allocate = s1_update_meta.allocate(idx)
when (allocate.valid) {
s1_update_mask (allocate.bits)(idx) := true.B
s1_update_taken(allocate.bits)(idx) := s1_update.bits.cfi_taken
s1_update_alloc(allocate.bits)(idx) := true.B
s1_update_u_mask(allocate.bits)(idx) := true.B
s1_update_u (allocate.bits)(idx) := 0.U
} .otherwise {
val provider = s1_update_meta.provider(idx)
val decr_mask = Mux(provider.valid, ~MaskLower(UIntToOH(provider.bits)), 0.U)
for (i <- 0 until tageNTables) {
when (decr_mask(i)) {
s1_update_u_mask(i)(idx) := true.B
s1_update_u (i)(idx) := 0.U
}
}
}
}
for (i <- 0 until tageNTables) {
for (w <- 0 until bankWidth) {
tables(i).io.update_mask(w) := RegNext(s1_update_mask(i)(w))
tables(i).io.update_taken(w) := RegNext(s1_update_taken(i)(w))
tables(i).io.update_alloc(w) := RegNext(s1_update_alloc(i)(w))
tables(i).io.update_old_ctr(w) := RegNext(s1_update_old_ctr(i)(w))
tables(i).io.update_u_mask(w) := RegNext(s1_update_u_mask(i)(w))
tables(i).io.update_u(w) := RegNext(s1_update_u(i)(w))
}
tables(i).io.update_pc := RegNext(s1_update.bits.pc)
tables(i).io.update_hist := RegNext(s1_update.bits.ghist)
}
//io.f3_meta := Cat(f3_meta.asUInt, micro.io.f3_meta(micro.metaSz-1,0), base.io.f3_meta(base.metaSz-1, 0))
io.f3_meta := f3_meta.asUInt
} | module lo_us_1(
input [7:0] R0_addr,
input R0_en,
input R0_clk,
output [3:0] R0_data,
input [7:0] W0_addr,
input W0_clk,
input [3:0] W0_data,
input [3:0] W0_mask
);
hi_us_0_ext hi_us_0_ext (
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (1'h1),
.W0_clk (W0_clk),
.W0_data (W0_data),
.W0_mask (W0_mask)
);
endmodule |
Generate the Verilog code corresponding to this Chisel code /*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import freechips.rocketchip.util._
case class ListBufferParameters[T <: Data](gen: T, queues: Int, entries: Int, bypass: Boolean)
{
val queueBits = log2Up(queues)
val entryBits = log2Up(entries)
}
class ListBufferPush[T <: Data](params: ListBufferParameters[T]) extends Bundle
{
val index = UInt(params.queueBits.W)
val data = Output(params.gen)
}
class ListBuffer[T <: Data](params: ListBufferParameters[T]) extends Module
{
override def desiredName = s"ListBuffer_${params.gen.typeName}_q${params.queues}_e${params.entries}"
val io = IO(new Bundle {
// push is visible on the same cycle; flow queues
val push = Flipped(Decoupled(new ListBufferPush(params)))
val valid = UInt(params.queues.W)
val pop = Flipped(Valid(UInt(params.queueBits.W)))
val data = Output(params.gen)
})
val valid = RegInit(0.U(params.queues.W))
val head = Mem(params.queues, UInt(params.entryBits.W))
val tail = Mem(params.queues, UInt(params.entryBits.W))
val used = RegInit(0.U(params.entries.W))
val next = Mem(params.entries, UInt(params.entryBits.W))
val data = Mem(params.entries, params.gen)
val freeOH = ~(leftOR(~used) << 1) & ~used
val freeIdx = OHToUInt(freeOH)
val valid_set = WireDefault(0.U(params.queues.W))
val valid_clr = WireDefault(0.U(params.queues.W))
val used_set = WireDefault(0.U(params.entries.W))
val used_clr = WireDefault(0.U(params.entries.W))
val push_tail = tail.read(io.push.bits.index)
val push_valid = valid(io.push.bits.index)
io.push.ready := !used.andR
when (io.push.fire) {
valid_set := UIntToOH(io.push.bits.index, params.queues)
used_set := freeOH
data.write(freeIdx, io.push.bits.data)
when (push_valid) {
next.write(push_tail, freeIdx)
} .otherwise {
head.write(io.push.bits.index, freeIdx)
}
tail.write(io.push.bits.index, freeIdx)
}
val pop_head = head.read(io.pop.bits)
val pop_valid = valid(io.pop.bits)
// Bypass push data to the peek port
io.data := (if (!params.bypass) data.read(pop_head) else Mux(!pop_valid, io.push.bits.data, data.read(pop_head)))
io.valid := (if (!params.bypass) valid else (valid | valid_set))
// It is an error to pop something that is not valid
assert (!io.pop.fire || (io.valid)(io.pop.bits))
when (io.pop.fire) {
used_clr := UIntToOH(pop_head, params.entries)
when (pop_head === tail.read(io.pop.bits)) {
valid_clr := UIntToOH(io.pop.bits, params.queues)
}
head.write(io.pop.bits, Mux(io.push.fire && push_valid && push_tail === pop_head, freeIdx, next.read(pop_head)))
}
// Empty bypass changes no state
when ((!params.bypass).B || !io.pop.valid || pop_valid) {
used := (used & ~used_clr) | used_set
valid := (valid & ~valid_clr) | valid_set
}
} | module data_40x73(
input [5:0] R0_addr,
input R0_en,
input R0_clk,
output [72:0] R0_data,
input [5:0] W0_addr,
input W0_en,
input W0_clk,
input [72:0] W0_data
);
reg [72:0] Memory[0:39];
always @(posedge W0_clk) begin
if (W0_en & 1'h1)
Memory[W0_addr] <= W0_data;
end
assign R0_data = R0_en ? Memory[R0_addr] : 73'bx;
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3.{Data, SyncReadMem, Vec}
import chisel3.util.log2Ceil
object DescribedSRAM {
def apply[T <: Data](
name: String,
desc: String,
size: BigInt, // depth
data: T
): SyncReadMem[T] = {
val mem = SyncReadMem(size, data)
mem.suggestName(name)
val granWidth = data match {
case v: Vec[_] => v.head.getWidth
case d => d.getWidth
}
val uid = 0
Annotated.srams(
component = mem,
name = name,
address_width = log2Ceil(size),
data_width = data.getWidth,
depth = size,
description = desc,
write_mask_granularity = granWidth
)
mem
}
} | module rockettile_icache_data_arrays_0(
input [8:0] RW0_addr,
input RW0_en,
input RW0_clk,
input RW0_wmode,
input [255:0] RW0_wdata,
output [255:0] RW0_rdata,
input [7:0] RW0_wmask
);
rockettile_icache_data_arrays_0_ext rockettile_icache_data_arrays_0_ext (
.RW0_addr (RW0_addr),
.RW0_en (RW0_en),
.RW0_clk (RW0_clk),
.RW0_wmode (RW0_wmode),
.RW0_wdata (RW0_wdata),
.RW0_rdata (RW0_rdata),
.RW0_wmask (RW0_wmask)
);
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3.{Data, SyncReadMem, Vec}
import chisel3.util.log2Ceil
object DescribedSRAM {
def apply[T <: Data](
name: String,
desc: String,
size: BigInt, // depth
data: T
): SyncReadMem[T] = {
val mem = SyncReadMem(size, data)
mem.suggestName(name)
val granWidth = data match {
case v: Vec[_] => v.head.getWidth
case d => d.getWidth
}
val uid = 0
Annotated.srams(
component = mem,
name = name,
address_width = log2Ceil(size),
data_width = data.getWidth,
depth = size,
description = desc,
write_mask_granularity = granWidth
)
mem
}
} | module rockettile_icache_tag_array(
input [5:0] RW0_addr,
input RW0_en,
input RW0_clk,
input RW0_wmode,
input [167:0] RW0_wdata,
output [167:0] RW0_rdata,
input [7:0] RW0_wmask
);
rockettile_icache_tag_array_ext rockettile_icache_tag_array_ext (
.RW0_addr (RW0_addr),
.RW0_en (RW0_en),
.RW0_clk (RW0_clk),
.RW0_wmode (RW0_wmode),
.RW0_wdata (RW0_wdata),
.RW0_rdata (RW0_rdata),
.RW0_wmask (RW0_wmask)
);
endmodule |
Generate the Verilog code corresponding to this Chisel code //******************************************************************************
// Copyright (c) 2016 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// FDiv/FSqrt Unit
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.exu
import chisel3._
import chisel3.util._
import chisel3.experimental.dataview._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.tile.FPConstants._
import freechips.rocketchip.tile
import boom.v3.common._
import boom.v3.util._
import freechips.rocketchip.tile.HasFPUParameters
import freechips.rocketchip.util.uintToBitPat
/**
* Decoder for FPU divide and square root signals
*/
class UOPCodeFDivDecoder(implicit p: Parameters) extends BoomModule
with HasFPUParameters
{
val io = IO(new Bundle {
val uopc = Input(Bits(UOPC_SZ.W))
val sigs = Output(new tile.FPUCtrlSigs())
})
val N = BitPat("b0")
val Y = BitPat("b1")
val X = BitPat("b?")
val decoder = freechips.rocketchip.rocket.DecodeLogic(io.uopc,
// Note: not all of these signals are used or necessary, but we're
// constrained by the need to fit the rocket.FPU units' ctrl signals.
// swap12 fma
// | swap32 | div
// | | typeTagIn | | sqrt
// ldst | | | typeTagOut | | wflags
// | wen | | | | from_int | | |
// | | ren1 | | | | | to_int | | |
// | | | ren2 | | | | | | fast | | |
// | | | | ren3 | | | | | | | | | |
// | | | | | | | | | | | | | | | |
/* Default */ List(X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X),
Array(
BitPat(uopFDIV_S) -> List(X,X,Y,Y,X, X,X,S,S,X,X,X, X,Y,N,Y),
BitPat(uopFDIV_D) -> List(X,X,Y,Y,X, X,X,D,D,X,X,X, X,Y,N,Y),
BitPat(uopFSQRT_S) -> List(X,X,Y,N,X, X,X,S,S,X,X,X, X,N,Y,Y),
BitPat(uopFSQRT_D) -> List(X,X,Y,N,X, X,X,D,D,X,X,X, X,N,Y,Y)
): Array[(BitPat, List[BitPat])])
val s = io.sigs
val sigs = Seq(s.ldst, s.wen, s.ren1, s.ren2, s.ren3, s.swap12,
s.swap23, s.typeTagIn, s.typeTagOut, s.fromint, s.toint, s.fastpipe, s.fma,
s.div, s.sqrt, s.wflags)
s.vec := false.B
sigs zip decoder map {case(s,d) => s := d}
}
/**
* fdiv/fsqrt is douple-precision. Must upconvert inputs and downconvert outputs
* as necessary. Must wait till killed uop finishes before we're ready again.
* fdiv/fsqrt unit uses an unstable FIFO interface, and thus we must spend a
* cycle buffering up an uop to provide slack between the issue queue and the
* fdiv/fsqrt unit. FDivUnit inherents directly from FunctionalUnit, because
* UnpipelinedFunctionalUnit can only handle 1 inflight uop, whereas FDivUnit
* contains up to 2 inflight uops due to the need to buffer the input as the
* fdiv unit uses an unstable FIFO interface.
* TODO extend UnpipelinedFunctionalUnit to handle a >1 uops inflight.
*
* @param isPipelined is the functional unit pipelined
* @param numStages number of stages for the functional unit
* @param numBypassStages number of bypass stages
* @param dataWidth width of the data out of the functional unit
*/
class FDivSqrtUnit(implicit p: Parameters)
extends FunctionalUnit(
isPipelined = false,
numStages = 1,
numBypassStages = 0,
dataWidth = 65,
needsFcsr = true)
with tile.HasFPUParameters
{
//--------------------------------------
// buffer inputs and upconvert as needed
// provide a one-entry queue to store incoming uops while waiting for the fdiv/fsqrt unit to become available.
val r_buffer_val = RegInit(false.B)
val r_buffer_req = Reg(new FuncUnitReq(dataWidth=65))
val r_buffer_fin = Reg(new tile.FPInput)
val fdiv_decoder = Module(new UOPCodeFDivDecoder)
fdiv_decoder.io.uopc := io.req.bits.uop.uopc
// handle branch kill on queued entry
r_buffer_val := !IsKilledByBranch(io.brupdate, r_buffer_req.uop) && !io.req.bits.kill && r_buffer_val
r_buffer_req.uop.br_mask := GetNewBrMask(io.brupdate, r_buffer_req.uop)
// handle incoming uop, including upconversion as needed, and push back if our input queue is already occupied
io.req.ready := !r_buffer_val
def upconvert(x: UInt) = {
val s2d = Module(new hardfloat.RecFNToRecFN(inExpWidth = 8, inSigWidth = 24, outExpWidth = 11, outSigWidth = 53))
s2d.io.in := x
s2d.io.roundingMode := 0.U
s2d.io.detectTininess := DontCare
s2d.io.out
}
val in1_upconvert = upconvert(unbox(io.req.bits.rs1_data, false.B, Some(tile.FType.S)))
val in2_upconvert = upconvert(unbox(io.req.bits.rs2_data, false.B, Some(tile.FType.S)))
when (io.req.valid && !IsKilledByBranch(io.brupdate, io.req.bits.uop) && !io.req.bits.kill) {
r_buffer_val := true.B
r_buffer_req := io.req.bits
r_buffer_req.uop.br_mask := GetNewBrMask(io.brupdate, io.req.bits.uop)
r_buffer_fin.viewAsSupertype(new tile.FPUCtrlSigs) := fdiv_decoder.io.sigs
r_buffer_fin.rm := Mux(ImmGenRm(io.req.bits.uop.imm_packed) === 7.U, io.fcsr_rm, ImmGenRm(io.req.bits.uop.imm_packed))
r_buffer_fin.typ := 0.U // unused for fdivsqrt
val tag = fdiv_decoder.io.sigs.typeTagIn
r_buffer_fin.in1 := unbox(io.req.bits.rs1_data, tag, Some(tile.FType.D))
r_buffer_fin.in2 := unbox(io.req.bits.rs2_data, tag, Some(tile.FType.D))
when (tag === S) {
r_buffer_fin.in1 := in1_upconvert
r_buffer_fin.in2 := in2_upconvert
}
}
assert (!(r_buffer_val && io.req.valid), "[fdiv] a request is incoming while the buffer is already full.")
//-----------
// fdiv/fsqrt
val divsqrt = Module(new hardfloat.DivSqrtRecF64)
val r_divsqrt_val = RegInit(false.B) // inflight uop?
val r_divsqrt_killed = Reg(Bool()) // has inflight uop been killed?
val r_divsqrt_fin = Reg(new tile.FPInput)
val r_divsqrt_uop = Reg(new MicroOp)
// Need to buffer output until RF writeport is available.
val output_buffer_available = Wire(Bool())
val may_fire_input =
r_buffer_val &&
(r_buffer_fin.div || r_buffer_fin.sqrt) &&
!r_divsqrt_val &&
output_buffer_available
val divsqrt_ready = Mux(divsqrt.io.sqrtOp, divsqrt.io.inReady_sqrt, divsqrt.io.inReady_div)
divsqrt.io.inValid := may_fire_input // must be setup early
divsqrt.io.sqrtOp := r_buffer_fin.sqrt
divsqrt.io.a := r_buffer_fin.in1
divsqrt.io.b := Mux(divsqrt.io.sqrtOp, r_buffer_fin.in1, r_buffer_fin.in2)
divsqrt.io.roundingMode := r_buffer_fin.rm
divsqrt.io.detectTininess := DontCare
r_divsqrt_killed := r_divsqrt_killed || IsKilledByBranch(io.brupdate, r_divsqrt_uop) || io.req.bits.kill
r_divsqrt_uop.br_mask := GetNewBrMask(io.brupdate, r_divsqrt_uop)
when (may_fire_input && divsqrt_ready) {
// Remove entry from the input buffer.
// We don't have time to kill divsqrt request so must track if killed on entry.
r_buffer_val := false.B
r_divsqrt_val := true.B
r_divsqrt_fin := r_buffer_fin
r_divsqrt_uop := r_buffer_req.uop
r_divsqrt_killed := IsKilledByBranch(io.brupdate, r_buffer_req.uop) || io.req.bits.kill
r_divsqrt_uop.br_mask := GetNewBrMask(io.brupdate, r_buffer_req.uop)
}
//-----------------------------------------
// buffer output and down-convert as needed
val r_out_val = RegInit(false.B)
val r_out_uop = Reg(new MicroOp)
val r_out_flags_double = Reg(Bits())
val r_out_wdata_double = Reg(Bits())
output_buffer_available := !r_out_val
r_out_uop.br_mask := GetNewBrMask(io.brupdate, r_out_uop)
when (io.resp.ready || IsKilledByBranch(io.brupdate, r_out_uop) || io.req.bits.kill) {
r_out_val := false.B
}
when (divsqrt.io.outValid_div || divsqrt.io.outValid_sqrt) {
r_divsqrt_val := false.B
r_out_val := !r_divsqrt_killed && !IsKilledByBranch(io.brupdate, r_divsqrt_uop) && !io.req.bits.kill
r_out_uop := r_divsqrt_uop
r_out_uop.br_mask := GetNewBrMask(io.brupdate, r_divsqrt_uop)
r_out_wdata_double := sanitizeNaN(divsqrt.io.out, tile.FType.D)
r_out_flags_double := divsqrt.io.exceptionFlags
assert (r_divsqrt_val, "[fdiv] a response is being generated for no request.")
}
assert (!(r_out_val && (divsqrt.io.outValid_div || divsqrt.io.outValid_sqrt)),
"[fdiv] Buffered output being overwritten by another output from the fdiv/fsqrt unit.")
val downvert_d2s = Module(new hardfloat.RecFNToRecFN(
inExpWidth = 11, inSigWidth = 53, outExpWidth = 8, outSigWidth = 24))
downvert_d2s.io.in := r_out_wdata_double
downvert_d2s.io.roundingMode := r_divsqrt_fin.rm
downvert_d2s.io.detectTininess := DontCare
val out_flags = r_out_flags_double | Mux(r_divsqrt_fin.typeTagIn === S, downvert_d2s.io.exceptionFlags, 0.U)
io.resp.valid := r_out_val && !IsKilledByBranch(io.brupdate, r_out_uop)
io.resp.bits.uop := r_out_uop
io.resp.bits.data :=
Mux(r_divsqrt_fin.typeTagIn === S,
box(downvert_d2s.io.out, false.B),
box(r_out_wdata_double, true.B))
io.resp.bits.fflags.valid := io.resp.valid
io.resp.bits.fflags.bits.uop := r_out_uop
io.resp.bits.fflags.bits.uop.br_mask := GetNewBrMask(io.brupdate, r_out_uop)
io.resp.bits.fflags.bits.flags := out_flags
} | module UOPCodeFDivDecoder(
input [6:0] io_uopc,
output [1:0] io_sigs_typeTagIn,
output io_sigs_div,
output io_sigs_sqrt
);
wire [4:0] decoder_decoded_invInputs = ~(io_uopc[4:0]);
wire [6:0] _decoder_decoded_andMatrixOutputs_T_1 = {decoder_decoded_invInputs[0], io_uopc[1], io_uopc[2], decoder_decoded_invInputs[3], decoder_decoded_invInputs[4], io_uopc[5], io_uopc[6]};
wire [3:0] _decoder_decoded_andMatrixOutputs_T_3 = {io_uopc[3], decoder_decoded_invInputs[4], io_uopc[5], io_uopc[6]};
assign io_sigs_typeTagIn = {1'h0, |{&_decoder_decoded_andMatrixOutputs_T_1, &_decoder_decoded_andMatrixOutputs_T_3}};
assign io_sigs_div = |{&{decoder_decoded_invInputs[1], io_uopc[2], decoder_decoded_invInputs[3], decoder_decoded_invInputs[4], io_uopc[5], io_uopc[6]}, &_decoder_decoded_andMatrixOutputs_T_1};
assign io_sigs_sqrt = |{&{io_uopc[0], io_uopc[1], io_uopc[2], decoder_decoded_invInputs[3], decoder_decoded_invInputs[4], io_uopc[5], io_uopc[6]}, &_decoder_decoded_andMatrixOutputs_T_3};
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.jtag for license details.
package freechips.rocketchip.jtag
import chisel3._
import chisel3.reflect.DataMirror
import chisel3.internal.firrtl.KnownWidth
import chisel3.util.{Cat, Valid}
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util.property
/** Base JTAG shifter IO, viewed from input to shift register chain.
* Can be chained together.
*/
class ShifterIO extends Bundle {
val shift = Bool() // advance the scan chain on clock high
val data = Bool() // as input: bit to be captured into shifter MSB on next rising edge; as output: value of shifter LSB
val capture = Bool() // high in the CaptureIR/DR state when this chain is selected
val update = Bool() // high in the UpdateIR/DR state when this chain is selected
/** Sets a output shifter IO's control signals from a input shifter IO's control signals.
*/
def chainControlFrom(in: ShifterIO): Unit = {
shift := in.shift
capture := in.capture
update := in.update
}
}
trait ChainIO extends Bundle {
val chainIn = Input(new ShifterIO)
val chainOut = Output(new ShifterIO)
}
class Capture[+T <: Data](gen: T) extends Bundle {
val bits = Input(gen) // data to capture, should be always valid
val capture = Output(Bool()) // will be high in capture state (single cycle), captured on following rising edge
}
object Capture {
def apply[T <: Data](gen: T): Capture[T] = new Capture(gen)
}
/** Trait that all JTAG chains (data and instruction registers) must extend, providing basic chain
* IO.
*/
trait Chain extends Module {
val io: ChainIO
}
/** One-element shift register, data register for bypass mode.
*
* Implements Clause 10.
*/
class JtagBypassChain(implicit val p: Parameters) extends Chain {
class ModIO extends ChainIO
val io = IO(new ModIO)
io.chainOut chainControlFrom io.chainIn
val reg = Reg(Bool()) // 10.1.1a single shift register stage
io.chainOut.data := reg
property.cover(io.chainIn.capture, "bypass_chain_capture", "JTAG; bypass_chain_capture; This Bypass Chain captured data")
when (io.chainIn.capture) {
reg := false.B // 10.1.1b capture logic 0 on TCK rising
} .elsewhen (io.chainIn.shift) {
reg := io.chainIn.data
}
assert(!(io.chainIn.capture && io.chainIn.update)
&& !(io.chainIn.capture && io.chainIn.shift)
&& !(io.chainIn.update && io.chainIn.shift))
}
object JtagBypassChain {
def apply()(implicit p: Parameters) = new JtagBypassChain
}
/** Simple shift register with parallel capture only, for read-only data registers.
*
* Number of stages is the number of bits in gen, which must have a known width.
*
* Useful notes:
* 7.2.1c shifter shifts on TCK rising edge
* 4.3.2a TDI captured on TCK rising edge, 6.1.2.1b assumed changes on TCK falling edge
*/
class CaptureChain[+T <: Data](gen: T)(implicit val p: Parameters) extends Chain {
override def desiredName = s"CaptureChain_${gen.typeName}"
class ModIO extends ChainIO {
val capture = Capture(gen)
}
val io = IO(new ModIO)
io.chainOut chainControlFrom io.chainIn
val n = DataMirror.widthOf(gen) match {
case KnownWidth(x) => x
case _ => require(false, s"can't generate chain for unknown width data type $gen"); -1 // TODO: remove -1 type hack
}
val regs = (0 until n) map (x => Reg(Bool()))
io.chainOut.data := regs(0)
property.cover(io.chainIn.capture, "chain_capture", "JTAG; chain_capture; This Chain captured data")
when (io.chainIn.capture) {
(0 until n) map (x => regs(x) := io.capture.bits.asUInt(x))
io.capture.capture := true.B
} .elsewhen (io.chainIn.shift) {
regs(n-1) := io.chainIn.data
(0 until n-1) map (x => regs(x) := regs(x+1))
io.capture.capture := false.B
} .otherwise {
io.capture.capture := false.B
}
assert(!(io.chainIn.capture && io.chainIn.update)
&& !(io.chainIn.capture && io.chainIn.shift)
&& !(io.chainIn.update && io.chainIn.shift))
}
object CaptureChain {
def apply[T <: Data](gen: T)(implicit p: Parameters) = new CaptureChain(gen)
}
/** Simple shift register with parallel capture and update. Useful for general instruction and data
* scan registers.
*
* Number of stages is the max number of bits in genCapture and genUpdate, both of which must have
* known widths. If there is a width mismatch, the unused most significant bits will be zero.
*
* Useful notes:
* 7.2.1c shifter shifts on TCK rising edge
* 4.3.2a TDI captured on TCK rising edge, 6.1.2.1b assumed changes on TCK falling edge
*/
class CaptureUpdateChain[+T <: Data, +V <: Data](genCapture: T, genUpdate: V)(implicit val p: Parameters) extends Chain {
override def desiredName = s"CaptureUpdateChain_${genCapture.typeName}_To_${genUpdate.typeName}"
class ModIO extends ChainIO {
val capture = Capture(genCapture)
val update = Valid(genUpdate) // valid high when in update state (single cycle), contents may change any time after
}
val io = IO(new ModIO)
io.chainOut chainControlFrom io.chainIn
val captureWidth = DataMirror.widthOf(genCapture) match {
case KnownWidth(x) => x
case _ => require(false, s"can't generate chain for unknown width data type $genCapture"); -1 // TODO: remove -1 type hack
}
val updateWidth = DataMirror.widthOf(genUpdate) match {
case KnownWidth(x) => x
case _ => require(false, s"can't generate chain for unknown width data type $genUpdate"); -1 // TODO: remove -1 type hack
}
val n = math.max(captureWidth, updateWidth)
val regs = (0 until n) map (x => Reg(Bool()))
io.chainOut.data := regs(0)
val updateBits = Cat(regs.reverse)(updateWidth-1, 0)
io.update.bits := updateBits.asTypeOf(io.update.bits)
val captureBits = io.capture.bits.asUInt
property.cover(io.chainIn.capture, "chain_capture", "JTAG;chain_capture; This Chain captured data")
property.cover(io.chainIn.capture, "chain_update", "JTAG;chain_update; This Chain updated data")
when (io.chainIn.capture) {
(0 until math.min(n, captureWidth)) map (x => regs(x) := captureBits(x))
(captureWidth until n) map (x => regs(x) := 0.U)
io.capture.capture := true.B
io.update.valid := false.B
} .elsewhen (io.chainIn.update) {
io.capture.capture := false.B
io.update.valid := true.B
} .elsewhen (io.chainIn.shift) {
regs(n-1) := io.chainIn.data
(0 until n-1) map (x => regs(x) := regs(x+1))
io.capture.capture := false.B
io.update.valid := false.B
} .otherwise {
io.capture.capture := false.B
io.update.valid := false.B
}
assert(!(io.chainIn.capture && io.chainIn.update)
&& !(io.chainIn.capture && io.chainIn.shift)
&& !(io.chainIn.update && io.chainIn.shift))
}
object CaptureUpdateChain {
/** Capture-update chain with matching capture and update types.
*/
def apply[T <: Data](gen: T)(implicit p: Parameters) = new CaptureUpdateChain(gen, gen)
def apply[T <: Data, V <: Data](genCapture: T, genUpdate: V)(implicit p: Parameters) =
new CaptureUpdateChain(genCapture, genUpdate)
} | module CaptureUpdateChain_DTMInfo_To_DTMInfo(
input clock,
input reset,
input io_chainIn_shift,
input io_chainIn_data,
input io_chainIn_capture,
input io_chainIn_update,
output io_chainOut_data,
input [1:0] io_capture_bits_dmiStatus,
output io_update_valid,
output io_update_bits_dmireset
);
reg regs_0;
reg regs_1;
reg regs_2;
reg regs_3;
reg regs_4;
reg regs_5;
reg regs_6;
reg regs_7;
reg regs_8;
reg regs_9;
reg regs_10;
reg regs_11;
reg regs_12;
reg regs_13;
reg regs_14;
reg regs_15;
reg regs_16;
reg regs_17;
reg regs_18;
reg regs_19;
reg regs_20;
reg regs_21;
reg regs_22;
reg regs_23;
reg regs_24;
reg regs_25;
reg regs_26;
reg regs_27;
reg regs_28;
reg regs_29;
reg regs_30;
reg regs_31;
wire _GEN = io_chainIn_update | ~io_chainIn_shift;
always @(posedge clock) begin
regs_0 <= io_chainIn_capture | (_GEN ? regs_0 : regs_1);
regs_1 <= ~io_chainIn_capture & (_GEN ? regs_1 : regs_2);
regs_2 <= ~io_chainIn_capture & (_GEN ? regs_2 : regs_3);
regs_3 <= ~io_chainIn_capture & (_GEN ? regs_3 : regs_4);
regs_4 <= io_chainIn_capture | (_GEN ? regs_4 : regs_5);
regs_5 <= io_chainIn_capture | (_GEN ? regs_5 : regs_6);
regs_6 <= io_chainIn_capture | (_GEN ? regs_6 : regs_7);
regs_7 <= ~io_chainIn_capture & (_GEN ? regs_7 : regs_8);
regs_8 <= ~io_chainIn_capture & (_GEN ? regs_8 : regs_9);
regs_9 <= ~io_chainIn_capture & (_GEN ? regs_9 : regs_10);
if (io_chainIn_capture) begin
regs_10 <= io_capture_bits_dmiStatus[0];
regs_11 <= io_capture_bits_dmiStatus[1];
end
else if (_GEN) begin
end
else begin
regs_10 <= regs_11;
regs_11 <= regs_12;
end
regs_12 <= io_chainIn_capture | (_GEN ? regs_12 : regs_13);
regs_13 <= ~io_chainIn_capture & (_GEN ? regs_13 : regs_14);
regs_14 <= io_chainIn_capture | (_GEN ? regs_14 : regs_15);
regs_15 <= ~io_chainIn_capture & (_GEN ? regs_15 : regs_16);
regs_16 <= ~io_chainIn_capture & (_GEN ? regs_16 : regs_17);
regs_17 <= ~io_chainIn_capture & (_GEN ? regs_17 : regs_18);
regs_18 <= ~io_chainIn_capture & (_GEN ? regs_18 : regs_19);
regs_19 <= ~io_chainIn_capture & (_GEN ? regs_19 : regs_20);
regs_20 <= ~io_chainIn_capture & (_GEN ? regs_20 : regs_21);
regs_21 <= ~io_chainIn_capture & (_GEN ? regs_21 : regs_22);
regs_22 <= ~io_chainIn_capture & (_GEN ? regs_22 : regs_23);
regs_23 <= ~io_chainIn_capture & (_GEN ? regs_23 : regs_24);
regs_24 <= ~io_chainIn_capture & (_GEN ? regs_24 : regs_25);
regs_25 <= ~io_chainIn_capture & (_GEN ? regs_25 : regs_26);
regs_26 <= ~io_chainIn_capture & (_GEN ? regs_26 : regs_27);
regs_27 <= ~io_chainIn_capture & (_GEN ? regs_27 : regs_28);
regs_28 <= ~io_chainIn_capture & (_GEN ? regs_28 : regs_29);
regs_29 <= ~io_chainIn_capture & (_GEN ? regs_29 : regs_30);
regs_30 <= ~io_chainIn_capture & (_GEN ? regs_30 : regs_31);
regs_31 <= ~io_chainIn_capture & (_GEN ? regs_31 : io_chainIn_data);
end
assign io_chainOut_data = regs_0;
assign io_update_valid = ~io_chainIn_capture & io_chainIn_update;
assign io_update_bits_dmireset = regs_16;
endmodule |
Generate the Verilog code corresponding to this Chisel code /*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN_interIo(expWidth: Int, sigWidth: Int) extends Bundle
{
//*** ENCODE SOME OF THESE CASES IN FEWER BITS?:
val isSigNaNAny = Bool()
val isNaNAOrB = Bool()
val isInfA = Bool()
val isZeroA = Bool()
val isInfB = Bool()
val isZeroB = Bool()
val signProd = Bool()
val isNaNC = Bool()
val isInfC = Bool()
val isZeroC = Bool()
val sExpSum = SInt((expWidth + 2).W)
val doSubMags = Bool()
val CIsDominant = Bool()
val CDom_CAlignDist = UInt(log2Ceil(sigWidth + 1).W)
val highAlignedSigC = UInt((sigWidth + 2).W)
val bit0AlignedSigC = UInt(1.W)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_preMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_preMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val mulAddA = Output(UInt(sigWidth.W))
val mulAddB = Output(UInt(sigWidth.W))
val mulAddC = Output(UInt((sigWidth * 2).W))
val toPostMul = Output(new MulAddRecFN_interIo(expWidth, sigWidth))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
//*** POSSIBLE TO REDUCE THIS BY 1 OR 2 BITS? (CURRENTLY 2 BITS BETWEEN
//*** UNSHIFTED C AND PRODUCT):
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawA = rawFloatFromRecFN(expWidth, sigWidth, io.a)
val rawB = rawFloatFromRecFN(expWidth, sigWidth, io.b)
val rawC = rawFloatFromRecFN(expWidth, sigWidth, io.c)
val signProd = rawA.sign ^ rawB.sign ^ io.op(1)
//*** REVIEW THE BIAS FOR 'sExpAlignedProd':
val sExpAlignedProd =
rawA.sExp +& rawB.sExp + (-(BigInt(1)<<expWidth) + sigWidth + 3).S
val doSubMags = signProd ^ rawC.sign ^ io.op(0)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sNatCAlignDist = sExpAlignedProd - rawC.sExp
val posNatCAlignDist = sNatCAlignDist(expWidth + 1, 0)
val isMinCAlign = rawA.isZero || rawB.isZero || (sNatCAlignDist < 0.S)
val CIsDominant =
! rawC.isZero && (isMinCAlign || (posNatCAlignDist <= sigWidth.U))
val CAlignDist =
Mux(isMinCAlign,
0.U,
Mux(posNatCAlignDist < (sigSumWidth - 1).U,
posNatCAlignDist(log2Ceil(sigSumWidth) - 1, 0),
(sigSumWidth - 1).U
)
)
val mainAlignedSigC =
(Mux(doSubMags, ~rawC.sig, rawC.sig) ## Fill(sigSumWidth - sigWidth + 2, doSubMags)).asSInt>>CAlignDist
val reduced4CExtra =
(orReduceBy4(rawC.sig<<((sigSumWidth - sigWidth - 1) & 3)) &
lowMask(
CAlignDist>>2,
//*** NOT NEEDED?:
// (sigSumWidth + 2)>>2,
(sigSumWidth - 1)>>2,
(sigSumWidth - sigWidth - 1)>>2
)
).orR
val alignedSigC =
Cat(mainAlignedSigC>>3,
Mux(doSubMags,
mainAlignedSigC(2, 0).andR && ! reduced4CExtra,
mainAlignedSigC(2, 0).orR || reduced4CExtra
)
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
io.mulAddA := rawA.sig
io.mulAddB := rawB.sig
io.mulAddC := alignedSigC(sigWidth * 2, 1)
io.toPostMul.isSigNaNAny :=
isSigNaNRawFloat(rawA) || isSigNaNRawFloat(rawB) ||
isSigNaNRawFloat(rawC)
io.toPostMul.isNaNAOrB := rawA.isNaN || rawB.isNaN
io.toPostMul.isInfA := rawA.isInf
io.toPostMul.isZeroA := rawA.isZero
io.toPostMul.isInfB := rawB.isInf
io.toPostMul.isZeroB := rawB.isZero
io.toPostMul.signProd := signProd
io.toPostMul.isNaNC := rawC.isNaN
io.toPostMul.isInfC := rawC.isInf
io.toPostMul.isZeroC := rawC.isZero
io.toPostMul.sExpSum :=
Mux(CIsDominant, rawC.sExp, sExpAlignedProd - sigWidth.S)
io.toPostMul.doSubMags := doSubMags
io.toPostMul.CIsDominant := CIsDominant
io.toPostMul.CDom_CAlignDist := CAlignDist(log2Ceil(sigWidth + 1) - 1, 0)
io.toPostMul.highAlignedSigC :=
alignedSigC(sigSumWidth - 1, sigWidth * 2 + 1)
io.toPostMul.bit0AlignedSigC := alignedSigC(0)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_postMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_postMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val fromPreMul = Input(new MulAddRecFN_interIo(expWidth, sigWidth))
val mulAddResult = Input(UInt((sigWidth * 2 + 1).W))
val roundingMode = Input(UInt(3.W))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_min = (io.roundingMode === round_min)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val opSignC = io.fromPreMul.signProd ^ io.fromPreMul.doSubMags
val sigSum =
Cat(Mux(io.mulAddResult(sigWidth * 2),
io.fromPreMul.highAlignedSigC + 1.U,
io.fromPreMul.highAlignedSigC
),
io.mulAddResult(sigWidth * 2 - 1, 0),
io.fromPreMul.bit0AlignedSigC
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val CDom_sign = opSignC
val CDom_sExp = io.fromPreMul.sExpSum - io.fromPreMul.doSubMags.zext
val CDom_absSigSum =
Mux(io.fromPreMul.doSubMags,
~sigSum(sigSumWidth - 1, sigWidth + 1),
0.U(1.W) ##
//*** IF GAP IS REDUCED TO 1 BIT, MUST REDUCE THIS COMPONENT TO 1 BIT TOO:
io.fromPreMul.highAlignedSigC(sigWidth + 1, sigWidth) ##
sigSum(sigSumWidth - 3, sigWidth + 2)
)
val CDom_absSigSumExtra =
Mux(io.fromPreMul.doSubMags,
(~sigSum(sigWidth, 1)).orR,
sigSum(sigWidth + 1, 1).orR
)
val CDom_mainSig =
(CDom_absSigSum<<io.fromPreMul.CDom_CAlignDist)(
sigWidth * 2 + 1, sigWidth - 3)
val CDom_reduced4SigExtra =
(orReduceBy4(CDom_absSigSum(sigWidth - 1, 0)<<(~sigWidth & 3)) &
lowMask(io.fromPreMul.CDom_CAlignDist>>2, 0, sigWidth>>2)).orR
val CDom_sig =
Cat(CDom_mainSig>>3,
CDom_mainSig(2, 0).orR || CDom_reduced4SigExtra ||
CDom_absSigSumExtra
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notCDom_signSigSum = sigSum(sigWidth * 2 + 3)
val notCDom_absSigSum =
Mux(notCDom_signSigSum,
~sigSum(sigWidth * 2 + 2, 0),
sigSum(sigWidth * 2 + 2, 0) + io.fromPreMul.doSubMags
)
val notCDom_reduced2AbsSigSum = orReduceBy2(notCDom_absSigSum)
val notCDom_normDistReduced2 = countLeadingZeros(notCDom_reduced2AbsSigSum)
val notCDom_nearNormDist = notCDom_normDistReduced2<<1
val notCDom_sExp = io.fromPreMul.sExpSum - notCDom_nearNormDist.asUInt.zext
val notCDom_mainSig =
(notCDom_absSigSum<<notCDom_nearNormDist)(
sigWidth * 2 + 3, sigWidth - 1)
val notCDom_reduced4SigExtra =
(orReduceBy2(
notCDom_reduced2AbsSigSum(sigWidth>>1, 0)<<((sigWidth>>1) & 1)) &
lowMask(notCDom_normDistReduced2>>1, 0, (sigWidth + 2)>>2)
).orR
val notCDom_sig =
Cat(notCDom_mainSig>>3,
notCDom_mainSig(2, 0).orR || notCDom_reduced4SigExtra
)
val notCDom_completeCancellation =
(notCDom_sig(sigWidth + 2, sigWidth + 1) === 0.U)
val notCDom_sign =
Mux(notCDom_completeCancellation,
roundingMode_min,
io.fromPreMul.signProd ^ notCDom_signSigSum
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notNaN_isInfProd = io.fromPreMul.isInfA || io.fromPreMul.isInfB
val notNaN_isInfOut = notNaN_isInfProd || io.fromPreMul.isInfC
val notNaN_addZeros =
(io.fromPreMul.isZeroA || io.fromPreMul.isZeroB) &&
io.fromPreMul.isZeroC
io.invalidExc :=
io.fromPreMul.isSigNaNAny ||
(io.fromPreMul.isInfA && io.fromPreMul.isZeroB) ||
(io.fromPreMul.isZeroA && io.fromPreMul.isInfB) ||
(! io.fromPreMul.isNaNAOrB &&
(io.fromPreMul.isInfA || io.fromPreMul.isInfB) &&
io.fromPreMul.isInfC &&
io.fromPreMul.doSubMags)
io.rawOut.isNaN := io.fromPreMul.isNaNAOrB || io.fromPreMul.isNaNC
io.rawOut.isInf := notNaN_isInfOut
//*** IMPROVE?:
io.rawOut.isZero :=
notNaN_addZeros ||
(! io.fromPreMul.CIsDominant && notCDom_completeCancellation)
io.rawOut.sign :=
(notNaN_isInfProd && io.fromPreMul.signProd) ||
(io.fromPreMul.isInfC && opSignC) ||
(notNaN_addZeros && ! roundingMode_min &&
io.fromPreMul.signProd && opSignC) ||
(notNaN_addZeros && roundingMode_min &&
(io.fromPreMul.signProd || opSignC)) ||
(! notNaN_isInfOut && ! notNaN_addZeros &&
Mux(io.fromPreMul.CIsDominant, CDom_sign, notCDom_sign))
io.rawOut.sExp := Mux(io.fromPreMul.CIsDominant, CDom_sExp, notCDom_sExp)
io.rawOut.sig := Mux(io.fromPreMul.CIsDominant, CDom_sig, notCDom_sig)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul =
Module(new MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul =
Module(new MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
mulAddRecFNToRaw_postMul.io.fromPreMul :=
mulAddRecFNToRaw_preMul.io.toPostMul
mulAddRecFNToRaw_postMul.io.mulAddResult := mulAddResult
mulAddRecFNToRaw_postMul.io.roundingMode := io.roundingMode
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := mulAddRecFNToRaw_postMul.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := false.B
roundRawFNToRecFN.io.in := mulAddRecFNToRaw_postMul.io.rawOut
roundRawFNToRecFN.io.roundingMode := io.roundingMode
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
} | module MulAddRecFNToRaw_preMul_e8_s24(
input [1:0] io_op,
input [32:0] io_a,
input [32:0] io_b,
input [32:0] io_c,
output [23:0] io_mulAddA,
output [23:0] io_mulAddB,
output [47:0] io_mulAddC,
output io_toPostMul_isSigNaNAny,
output io_toPostMul_isNaNAOrB,
output io_toPostMul_isInfA,
output io_toPostMul_isZeroA,
output io_toPostMul_isInfB,
output io_toPostMul_isZeroB,
output io_toPostMul_signProd,
output io_toPostMul_isNaNC,
output io_toPostMul_isInfC,
output io_toPostMul_isZeroC,
output [9:0] io_toPostMul_sExpSum,
output io_toPostMul_doSubMags,
output io_toPostMul_CIsDominant,
output [4:0] io_toPostMul_CDom_CAlignDist,
output [25:0] io_toPostMul_highAlignedSigC,
output io_toPostMul_bit0AlignedSigC
);
wire rawA_isNaN = (&(io_a[31:30])) & io_a[29];
wire rawB_isNaN = (&(io_b[31:30])) & io_b[29];
wire rawC_isNaN = (&(io_c[31:30])) & io_c[29];
wire signProd = io_a[32] ^ io_b[32] ^ io_op[1];
wire [10:0] _sExpAlignedProd_T_1 = {2'h0, io_a[31:23]} + {2'h0, io_b[31:23]} - 11'hE5;
wire doSubMags = signProd ^ io_c[32] ^ io_op[0];
wire [10:0] _sNatCAlignDist_T = _sExpAlignedProd_T_1 - {2'h0, io_c[31:23]};
wire isMinCAlign = ~(|(io_a[31:29])) | ~(|(io_b[31:29])) | $signed(_sNatCAlignDist_T) < 11'sh0;
wire CIsDominant = (|(io_c[31:29])) & (isMinCAlign | _sNatCAlignDist_T[9:0] < 10'h19);
wire [6:0] CAlignDist = isMinCAlign ? 7'h0 : _sNatCAlignDist_T[9:0] < 10'h4A ? _sNatCAlignDist_T[6:0] : 7'h4A;
wire [77:0] mainAlignedSigC = $signed($signed({doSubMags ? {1'h1, ~(|(io_c[31:29])), ~(io_c[22:0])} : {1'h0, |(io_c[31:29]), io_c[22:0]}, {53{doSubMags}}}) >>> CAlignDist);
wire [32:0] reduced4CExtra_shift = $signed(33'sh100000000 >>> CAlignDist[6:2]);
wire [5:0] _GEN = {|(io_c[21:18]), |(io_c[17:14]), |(io_c[13:10]), |(io_c[9:6]), |(io_c[5:2]), |(io_c[1:0])} & {reduced4CExtra_shift[14], reduced4CExtra_shift[15], reduced4CExtra_shift[16], reduced4CExtra_shift[17], reduced4CExtra_shift[18], reduced4CExtra_shift[19]};
assign io_mulAddA = {|(io_a[31:29]), io_a[22:0]};
assign io_mulAddB = {|(io_b[31:29]), io_b[22:0]};
assign io_mulAddC = mainAlignedSigC[50:3];
assign io_toPostMul_isSigNaNAny = rawA_isNaN & ~(io_a[22]) | rawB_isNaN & ~(io_b[22]) | rawC_isNaN & ~(io_c[22]);
assign io_toPostMul_isNaNAOrB = rawA_isNaN | rawB_isNaN;
assign io_toPostMul_isInfA = (&(io_a[31:30])) & ~(io_a[29]);
assign io_toPostMul_isZeroA = ~(|(io_a[31:29]));
assign io_toPostMul_isInfB = (&(io_b[31:30])) & ~(io_b[29]);
assign io_toPostMul_isZeroB = ~(|(io_b[31:29]));
assign io_toPostMul_signProd = signProd;
assign io_toPostMul_isNaNC = rawC_isNaN;
assign io_toPostMul_isInfC = (&(io_c[31:30])) & ~(io_c[29]);
assign io_toPostMul_isZeroC = ~(|(io_c[31:29]));
assign io_toPostMul_sExpSum = CIsDominant ? {1'h0, io_c[31:23]} : _sExpAlignedProd_T_1[9:0] - 10'h18;
assign io_toPostMul_doSubMags = doSubMags;
assign io_toPostMul_CIsDominant = CIsDominant;
assign io_toPostMul_CDom_CAlignDist = CAlignDist[4:0];
assign io_toPostMul_highAlignedSigC = mainAlignedSigC[76:51];
assign io_toPostMul_bit0AlignedSigC = doSubMags ? (&(mainAlignedSigC[2:0])) & _GEN == 6'h0 : (|{mainAlignedSigC[2:0], _GEN});
endmodule |
Generate the Verilog code corresponding to this Chisel code package boom.v3.ifu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import boom.v3.common._
import boom.v3.util.{BoomCoreStringPrefix, MaskLower, WrapInc}
import scala.math.min
class TageResp extends Bundle {
val ctr = UInt(3.W)
val u = UInt(2.W)
}
class TageTable(val nRows: Int, val tagSz: Int, val histLength: Int, val uBitPeriod: Int)
(implicit p: Parameters) extends BoomModule()(p)
with HasBoomFrontendParameters
{
require(histLength <= globalHistoryLength)
val nWrBypassEntries = 2
val io = IO( new Bundle {
val f1_req_valid = Input(Bool())
val f1_req_pc = Input(UInt(vaddrBitsExtended.W))
val f1_req_ghist = Input(UInt(globalHistoryLength.W))
val f3_resp = Output(Vec(bankWidth, Valid(new TageResp)))
val update_mask = Input(Vec(bankWidth, Bool()))
val update_taken = Input(Vec(bankWidth, Bool()))
val update_alloc = Input(Vec(bankWidth, Bool()))
val update_old_ctr = Input(Vec(bankWidth, UInt(3.W)))
val update_pc = Input(UInt())
val update_hist = Input(UInt())
val update_u_mask = Input(Vec(bankWidth, Bool()))
val update_u = Input(Vec(bankWidth, UInt(2.W)))
})
def compute_folded_hist(hist: UInt, l: Int) = {
val nChunks = (histLength + l - 1) / l
val hist_chunks = (0 until nChunks) map {i =>
hist(min((i+1)*l, histLength)-1, i*l)
}
hist_chunks.reduce(_^_)
}
def compute_tag_and_hash(unhashed_idx: UInt, hist: UInt) = {
val idx_history = compute_folded_hist(hist, log2Ceil(nRows))
val idx = (unhashed_idx ^ idx_history)(log2Ceil(nRows)-1,0)
val tag_history = compute_folded_hist(hist, tagSz)
val tag = ((unhashed_idx >> log2Ceil(nRows)) ^ tag_history)(tagSz-1,0)
(idx, tag)
}
def inc_ctr(ctr: UInt, taken: Bool): UInt = {
Mux(!taken, Mux(ctr === 0.U, 0.U, ctr - 1.U),
Mux(ctr === 7.U, 7.U, ctr + 1.U))
}
val doing_reset = RegInit(true.B)
val reset_idx = RegInit(0.U(log2Ceil(nRows).W))
reset_idx := reset_idx + doing_reset
when (reset_idx === (nRows-1).U) { doing_reset := false.B }
class TageEntry extends Bundle {
val valid = Bool() // TODO: Remove this valid bit
val tag = UInt(tagSz.W)
val ctr = UInt(3.W)
}
val tageEntrySz = 1 + tagSz + 3
val (s1_hashed_idx, s1_tag) = compute_tag_and_hash(fetchIdx(io.f1_req_pc), io.f1_req_ghist)
val hi_us = SyncReadMem(nRows, Vec(bankWidth, Bool()))
val lo_us = SyncReadMem(nRows, Vec(bankWidth, Bool()))
val table = SyncReadMem(nRows, Vec(bankWidth, UInt(tageEntrySz.W)))
val mems = Seq((f"tage_l$histLength", nRows, bankWidth * tageEntrySz))
val s2_tag = RegNext(s1_tag)
val s2_req_rtage = VecInit(table.read(s1_hashed_idx, io.f1_req_valid).map(_.asTypeOf(new TageEntry)))
val s2_req_rhius = hi_us.read(s1_hashed_idx, io.f1_req_valid)
val s2_req_rlous = lo_us.read(s1_hashed_idx, io.f1_req_valid)
val s2_req_rhits = VecInit(s2_req_rtage.map(e => e.valid && e.tag === s2_tag && !doing_reset))
for (w <- 0 until bankWidth) {
// This bit indicates the TAGE table matched here
io.f3_resp(w).valid := RegNext(s2_req_rhits(w))
io.f3_resp(w).bits.u := RegNext(Cat(s2_req_rhius(w), s2_req_rlous(w)))
io.f3_resp(w).bits.ctr := RegNext(s2_req_rtage(w).ctr)
}
val clear_u_ctr = RegInit(0.U((log2Ceil(uBitPeriod) + log2Ceil(nRows) + 1).W))
when (doing_reset) { clear_u_ctr := 1.U } .otherwise { clear_u_ctr := clear_u_ctr + 1.U }
val doing_clear_u = clear_u_ctr(log2Ceil(uBitPeriod)-1,0) === 0.U
val doing_clear_u_hi = doing_clear_u && clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 1.U
val doing_clear_u_lo = doing_clear_u && clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 0.U
val clear_u_idx = clear_u_ctr >> log2Ceil(uBitPeriod)
val (update_idx, update_tag) = compute_tag_and_hash(fetchIdx(io.update_pc), io.update_hist)
val update_wdata = Wire(Vec(bankWidth, new TageEntry))
table.write(
Mux(doing_reset, reset_idx , update_idx),
Mux(doing_reset, VecInit(Seq.fill(bankWidth) { 0.U(tageEntrySz.W) }), VecInit(update_wdata.map(_.asUInt))),
Mux(doing_reset, ~(0.U(bankWidth.W)) , io.update_mask.asUInt).asBools
)
val update_hi_wdata = Wire(Vec(bankWidth, Bool()))
hi_us.write(
Mux(doing_reset, reset_idx, Mux(doing_clear_u_hi, clear_u_idx, update_idx)),
Mux(doing_reset || doing_clear_u_hi, VecInit((0.U(bankWidth.W)).asBools), update_hi_wdata),
Mux(doing_reset || doing_clear_u_hi, ~(0.U(bankWidth.W)), io.update_u_mask.asUInt).asBools
)
val update_lo_wdata = Wire(Vec(bankWidth, Bool()))
lo_us.write(
Mux(doing_reset, reset_idx, Mux(doing_clear_u_lo, clear_u_idx, update_idx)),
Mux(doing_reset || doing_clear_u_lo, VecInit((0.U(bankWidth.W)).asBools), update_lo_wdata),
Mux(doing_reset || doing_clear_u_lo, ~(0.U(bankWidth.W)), io.update_u_mask.asUInt).asBools
)
val wrbypass_tags = Reg(Vec(nWrBypassEntries, UInt(tagSz.W)))
val wrbypass_idxs = Reg(Vec(nWrBypassEntries, UInt(log2Ceil(nRows).W)))
val wrbypass = Reg(Vec(nWrBypassEntries, Vec(bankWidth, UInt(3.W))))
val wrbypass_enq_idx = RegInit(0.U(log2Ceil(nWrBypassEntries).W))
val wrbypass_hits = VecInit((0 until nWrBypassEntries) map { i =>
!doing_reset &&
wrbypass_tags(i) === update_tag &&
wrbypass_idxs(i) === update_idx
})
val wrbypass_hit = wrbypass_hits.reduce(_||_)
val wrbypass_hit_idx = PriorityEncoder(wrbypass_hits)
for (w <- 0 until bankWidth) {
update_wdata(w).ctr := Mux(io.update_alloc(w),
Mux(io.update_taken(w), 4.U,
3.U
),
Mux(wrbypass_hit, inc_ctr(wrbypass(wrbypass_hit_idx)(w), io.update_taken(w)),
inc_ctr(io.update_old_ctr(w), io.update_taken(w))
)
)
update_wdata(w).valid := true.B
update_wdata(w).tag := update_tag
update_hi_wdata(w) := io.update_u(w)(1)
update_lo_wdata(w) := io.update_u(w)(0)
}
when (io.update_mask.reduce(_||_)) {
when (wrbypass_hits.reduce(_||_)) {
wrbypass(wrbypass_hit_idx) := VecInit(update_wdata.map(_.ctr))
} .otherwise {
wrbypass (wrbypass_enq_idx) := VecInit(update_wdata.map(_.ctr))
wrbypass_tags(wrbypass_enq_idx) := update_tag
wrbypass_idxs(wrbypass_enq_idx) := update_idx
wrbypass_enq_idx := WrapInc(wrbypass_enq_idx, nWrBypassEntries)
}
}
}
case class BoomTageParams(
// nSets, histLen, tagSz
tableInfo: Seq[Tuple3[Int, Int, Int]] = Seq(( 128, 2, 7),
( 128, 4, 7),
( 256, 8, 8),
( 256, 16, 8),
( 128, 32, 9),
( 128, 64, 9)),
uBitPeriod: Int = 2048
)
class TageBranchPredictorBank(params: BoomTageParams = BoomTageParams())(implicit p: Parameters) extends BranchPredictorBank()(p)
{
val tageUBitPeriod = params.uBitPeriod
val tageNTables = params.tableInfo.size
class TageMeta extends Bundle
{
val provider = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W)))
val alt_differs = Vec(bankWidth, Output(Bool()))
val provider_u = Vec(bankWidth, Output(UInt(2.W)))
val provider_ctr = Vec(bankWidth, Output(UInt(3.W)))
val allocate = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W)))
}
val f3_meta = Wire(new TageMeta)
override val metaSz = f3_meta.asUInt.getWidth
require(metaSz <= bpdMaxMetaLength)
def inc_u(u: UInt, alt_differs: Bool, mispredict: Bool): UInt = {
Mux(!alt_differs, u,
Mux(mispredict, Mux(u === 0.U, 0.U, u - 1.U),
Mux(u === 3.U, 3.U, u + 1.U)))
}
val tt = params.tableInfo map {
case (n, l, s) => {
val t = Module(new TageTable(n, s, l, params.uBitPeriod))
t.io.f1_req_valid := RegNext(io.f0_valid)
t.io.f1_req_pc := RegNext(io.f0_pc)
t.io.f1_req_ghist := io.f1_ghist
(t, t.mems)
}
}
val tables = tt.map(_._1)
val mems = tt.map(_._2).flatten
val f3_resps = VecInit(tables.map(_.io.f3_resp))
val s1_update_meta = s1_update.bits.meta.asTypeOf(new TageMeta)
val s1_update_mispredict_mask = UIntToOH(s1_update.bits.cfi_idx.bits) &
Fill(bankWidth, s1_update.bits.cfi_mispredicted)
val s1_update_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, Bool()))))
val s1_update_u_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, UInt(1.W)))))
val s1_update_taken = Wire(Vec(tageNTables, Vec(bankWidth, Bool())))
val s1_update_old_ctr = Wire(Vec(tageNTables, Vec(bankWidth, UInt(3.W))))
val s1_update_alloc = Wire(Vec(tageNTables, Vec(bankWidth, Bool())))
val s1_update_u = Wire(Vec(tageNTables, Vec(bankWidth, UInt(2.W))))
s1_update_taken := DontCare
s1_update_old_ctr := DontCare
s1_update_alloc := DontCare
s1_update_u := DontCare
for (w <- 0 until bankWidth) {
var altpred = io.resp_in(0).f3(w).taken
val final_altpred = WireInit(io.resp_in(0).f3(w).taken)
var provided = false.B
var provider = 0.U
io.resp.f3(w).taken := io.resp_in(0).f3(w).taken
for (i <- 0 until tageNTables) {
val hit = f3_resps(i)(w).valid
val ctr = f3_resps(i)(w).bits.ctr
when (hit) {
io.resp.f3(w).taken := Mux(ctr === 3.U || ctr === 4.U, altpred, ctr(2))
final_altpred := altpred
}
provided = provided || hit
provider = Mux(hit, i.U, provider)
altpred = Mux(hit, f3_resps(i)(w).bits.ctr(2), altpred)
}
f3_meta.provider(w).valid := provided
f3_meta.provider(w).bits := provider
f3_meta.alt_differs(w) := final_altpred =/= io.resp.f3(w).taken
f3_meta.provider_u(w) := f3_resps(provider)(w).bits.u
f3_meta.provider_ctr(w) := f3_resps(provider)(w).bits.ctr
// Create a mask of tables which did not hit our query, and also contain useless entries
// and also uses a longer history than the provider
val allocatable_slots = (
VecInit(f3_resps.map(r => !r(w).valid && r(w).bits.u === 0.U)).asUInt &
~(MaskLower(UIntToOH(provider)) & Fill(tageNTables, provided))
)
val alloc_lfsr = random.LFSR(tageNTables max 2)
val first_entry = PriorityEncoder(allocatable_slots)
val masked_entry = PriorityEncoder(allocatable_slots & alloc_lfsr)
val alloc_entry = Mux(allocatable_slots(masked_entry),
masked_entry,
first_entry)
f3_meta.allocate(w).valid := allocatable_slots =/= 0.U
f3_meta.allocate(w).bits := alloc_entry
val update_was_taken = (s1_update.bits.cfi_idx.valid &&
(s1_update.bits.cfi_idx.bits === w.U) &&
s1_update.bits.cfi_taken)
when (s1_update.bits.br_mask(w) && s1_update.valid && s1_update.bits.is_commit_update) {
when (s1_update_meta.provider(w).valid) {
val provider = s1_update_meta.provider(w).bits
s1_update_mask(provider)(w) := true.B
s1_update_u_mask(provider)(w) := true.B
val new_u = inc_u(s1_update_meta.provider_u(w),
s1_update_meta.alt_differs(w),
s1_update_mispredict_mask(w))
s1_update_u (provider)(w) := new_u
s1_update_taken (provider)(w) := update_was_taken
s1_update_old_ctr(provider)(w) := s1_update_meta.provider_ctr(w)
s1_update_alloc (provider)(w) := false.B
}
}
}
when (s1_update.valid && s1_update.bits.is_commit_update && s1_update.bits.cfi_mispredicted && s1_update.bits.cfi_idx.valid) {
val idx = s1_update.bits.cfi_idx.bits
val allocate = s1_update_meta.allocate(idx)
when (allocate.valid) {
s1_update_mask (allocate.bits)(idx) := true.B
s1_update_taken(allocate.bits)(idx) := s1_update.bits.cfi_taken
s1_update_alloc(allocate.bits)(idx) := true.B
s1_update_u_mask(allocate.bits)(idx) := true.B
s1_update_u (allocate.bits)(idx) := 0.U
} .otherwise {
val provider = s1_update_meta.provider(idx)
val decr_mask = Mux(provider.valid, ~MaskLower(UIntToOH(provider.bits)), 0.U)
for (i <- 0 until tageNTables) {
when (decr_mask(i)) {
s1_update_u_mask(i)(idx) := true.B
s1_update_u (i)(idx) := 0.U
}
}
}
}
for (i <- 0 until tageNTables) {
for (w <- 0 until bankWidth) {
tables(i).io.update_mask(w) := RegNext(s1_update_mask(i)(w))
tables(i).io.update_taken(w) := RegNext(s1_update_taken(i)(w))
tables(i).io.update_alloc(w) := RegNext(s1_update_alloc(i)(w))
tables(i).io.update_old_ctr(w) := RegNext(s1_update_old_ctr(i)(w))
tables(i).io.update_u_mask(w) := RegNext(s1_update_u_mask(i)(w))
tables(i).io.update_u(w) := RegNext(s1_update_u(i)(w))
}
tables(i).io.update_pc := RegNext(s1_update.bits.pc)
tables(i).io.update_hist := RegNext(s1_update.bits.ghist)
}
//io.f3_meta := Cat(f3_meta.asUInt, micro.io.f3_meta(micro.metaSz-1,0), base.io.f3_meta(base.metaSz-1, 0))
io.f3_meta := f3_meta.asUInt
} | module hi_us_1(
input [7:0] R0_addr,
input R0_en,
input R0_clk,
output [3:0] R0_data,
input [7:0] W0_addr,
input W0_clk,
input [3:0] W0_data,
input [3:0] W0_mask
);
hi_us_0_ext hi_us_0_ext (
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (1'h1),
.W0_clk (W0_clk),
.W0_data (W0_data),
.W0_mask (W0_mask)
);
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util.{Cat, log2Ceil}
import org.chipsalliance.cde.config._
import freechips.rocketchip.tile._
import freechips.rocketchip.util._
class PMPConfig extends Bundle {
val l = Bool()
val res = UInt(2.W)
val a = UInt(2.W)
val x = Bool()
val w = Bool()
val r = Bool()
}
object PMP {
def lgAlign = 2
def apply(reg: PMPReg): PMP = {
val pmp = Wire(new PMP()(reg.p))
pmp.cfg := reg.cfg
pmp.addr := reg.addr
pmp.mask := pmp.computeMask
pmp
}
}
class PMPReg(implicit p: Parameters) extends CoreBundle()(p) {
val cfg = new PMPConfig
val addr = UInt((paddrBits - PMP.lgAlign).W)
def reset(): Unit = {
cfg.a := 0.U
cfg.l := 0.U
}
def readAddr = if (pmpGranularity.log2 == PMP.lgAlign) addr else {
val mask = ((BigInt(1) << (pmpGranularity.log2 - PMP.lgAlign)) - 1).U
Mux(napot, addr | (mask >> 1), ~(~addr | mask))
}
def napot = cfg.a(1)
def torNotNAPOT = cfg.a(0)
def tor = !napot && torNotNAPOT
def cfgLocked = cfg.l
def addrLocked(next: PMPReg) = cfgLocked || next.cfgLocked && next.tor
}
class PMP(implicit p: Parameters) extends PMPReg {
val mask = UInt(paddrBits.W)
import PMP._
def computeMask = {
val base = Cat(addr, cfg.a(0)) | ((pmpGranularity - 1).U >> lgAlign)
Cat(base & ~(base + 1.U), ((1 << lgAlign) - 1).U)
}
private def comparand = ~(~(addr << lgAlign) | (pmpGranularity - 1).U)
private def pow2Match(x: UInt, lgSize: UInt, lgMaxSize: Int) = {
def eval(a: UInt, b: UInt, m: UInt) = ((a ^ b) & ~m) === 0.U
if (lgMaxSize <= pmpGranularity.log2) {
eval(x, comparand, mask)
} else {
// break up the circuit; the MSB part will be CSE'd
val lsbMask = mask | UIntToOH1(lgSize, lgMaxSize)
val msbMatch = eval(x >> lgMaxSize, comparand >> lgMaxSize, mask >> lgMaxSize)
val lsbMatch = eval(x(lgMaxSize-1, 0), comparand(lgMaxSize-1, 0), lsbMask(lgMaxSize-1, 0))
msbMatch && lsbMatch
}
}
private def boundMatch(x: UInt, lsbMask: UInt, lgMaxSize: Int) = {
if (lgMaxSize <= pmpGranularity.log2) {
x < comparand
} else {
// break up the circuit; the MSB part will be CSE'd
val msbsLess = (x >> lgMaxSize) < (comparand >> lgMaxSize)
val msbsEqual = ((x >> lgMaxSize) ^ (comparand >> lgMaxSize)) === 0.U
val lsbsLess = (x(lgMaxSize-1, 0) | lsbMask) < comparand(lgMaxSize-1, 0)
msbsLess || (msbsEqual && lsbsLess)
}
}
private def lowerBoundMatch(x: UInt, lgSize: UInt, lgMaxSize: Int) =
!boundMatch(x, UIntToOH1(lgSize, lgMaxSize), lgMaxSize)
private def upperBoundMatch(x: UInt, lgMaxSize: Int) =
boundMatch(x, 0.U, lgMaxSize)
private def rangeMatch(x: UInt, lgSize: UInt, lgMaxSize: Int, prev: PMP) =
prev.lowerBoundMatch(x, lgSize, lgMaxSize) && upperBoundMatch(x, lgMaxSize)
private def pow2Homogeneous(x: UInt, pgLevel: UInt) = {
val maskHomogeneous = pgLevelMap { idxBits => if (idxBits > paddrBits) false.B else mask(idxBits - 1) } (pgLevel)
maskHomogeneous || (pgLevelMap { idxBits => ((x ^ comparand) >> idxBits) =/= 0.U } (pgLevel))
}
private def pgLevelMap[T](f: Int => T) = (0 until pgLevels).map { i =>
f(pgIdxBits + (pgLevels - 1 - i) * pgLevelBits)
}
private def rangeHomogeneous(x: UInt, pgLevel: UInt, prev: PMP) = {
val beginsAfterLower = !(x < prev.comparand)
val beginsAfterUpper = !(x < comparand)
val pgMask = pgLevelMap { idxBits => (((BigInt(1) << paddrBits) - (BigInt(1) << idxBits)) max 0).U } (pgLevel)
val endsBeforeLower = (x & pgMask) < (prev.comparand & pgMask)
val endsBeforeUpper = (x & pgMask) < (comparand & pgMask)
endsBeforeLower || beginsAfterUpper || (beginsAfterLower && endsBeforeUpper)
}
// returns whether this PMP completely contains, or contains none of, a page
def homogeneous(x: UInt, pgLevel: UInt, prev: PMP): Bool =
Mux(napot, pow2Homogeneous(x, pgLevel), !torNotNAPOT || rangeHomogeneous(x, pgLevel, prev))
// returns whether this matching PMP fully contains the access
def aligned(x: UInt, lgSize: UInt, lgMaxSize: Int, prev: PMP): Bool = if (lgMaxSize <= pmpGranularity.log2) true.B else {
val lsbMask = UIntToOH1(lgSize, lgMaxSize)
val straddlesLowerBound = ((x >> lgMaxSize) ^ (prev.comparand >> lgMaxSize)) === 0.U && (prev.comparand(lgMaxSize-1, 0) & ~x(lgMaxSize-1, 0)) =/= 0.U
val straddlesUpperBound = ((x >> lgMaxSize) ^ (comparand >> lgMaxSize)) === 0.U && (comparand(lgMaxSize-1, 0) & (x(lgMaxSize-1, 0) | lsbMask)) =/= 0.U
val rangeAligned = !(straddlesLowerBound || straddlesUpperBound)
val pow2Aligned = (lsbMask & ~mask(lgMaxSize-1, 0)) === 0.U
Mux(napot, pow2Aligned, rangeAligned)
}
// returns whether this PMP matches at least one byte of the access
def hit(x: UInt, lgSize: UInt, lgMaxSize: Int, prev: PMP): Bool =
Mux(napot, pow2Match(x, lgSize, lgMaxSize), torNotNAPOT && rangeMatch(x, lgSize, lgMaxSize, prev))
}
class PMPHomogeneityChecker(pmps: Seq[PMP])(implicit p: Parameters) {
def apply(addr: UInt, pgLevel: UInt): Bool = {
pmps.foldLeft((true.B, 0.U.asTypeOf(new PMP))) { case ((h, prev), pmp) =>
(h && pmp.homogeneous(addr, pgLevel, prev), pmp)
}._1
}
}
class PMPChecker(lgMaxSize: Int)(implicit val p: Parameters) extends Module
with HasCoreParameters {
override def desiredName = s"PMPChecker_s${lgMaxSize}"
val io = IO(new Bundle {
val prv = Input(UInt(PRV.SZ.W))
val pmp = Input(Vec(nPMPs, new PMP))
val addr = Input(UInt(paddrBits.W))
val size = Input(UInt(log2Ceil(lgMaxSize + 1).W))
val r = Output(Bool())
val w = Output(Bool())
val x = Output(Bool())
})
val default = if (io.pmp.isEmpty) true.B else io.prv > PRV.S.U
val pmp0 = WireInit(0.U.asTypeOf(new PMP))
pmp0.cfg.r := default
pmp0.cfg.w := default
pmp0.cfg.x := default
val res = (io.pmp zip (pmp0 +: io.pmp)).reverse.foldLeft(pmp0) { case (prev, (pmp, prevPMP)) =>
val hit = pmp.hit(io.addr, io.size, lgMaxSize, prevPMP)
val ignore = default && !pmp.cfg.l
val aligned = pmp.aligned(io.addr, io.size, lgMaxSize, prevPMP)
for ((name, idx) <- Seq("no", "TOR", if (pmpGranularity <= 4) "NA4" else "", "NAPOT").zipWithIndex; if name.nonEmpty)
property.cover(pmp.cfg.a === idx.U, s"The cfg access is set to ${name} access ", "Cover PMP access mode setting")
property.cover(pmp.cfg.l === 0x1.U, s"The cfg lock is set to high ", "Cover PMP lock mode setting")
// Not including Write and no Read permission as the combination is reserved
for ((name, idx) <- Seq("no", "RO", "", "RW", "X", "RX", "", "RWX").zipWithIndex; if name.nonEmpty)
property.cover((Cat(pmp.cfg.x, pmp.cfg.w, pmp.cfg.r) === idx.U), s"The permission is set to ${name} access ", "Cover PMP access permission setting")
for ((name, idx) <- Seq("", "TOR", if (pmpGranularity <= 4) "NA4" else "", "NAPOT").zipWithIndex; if name.nonEmpty) {
property.cover(!ignore && hit && aligned && pmp.cfg.a === idx.U, s"The access matches ${name} mode ", "Cover PMP access")
property.cover(pmp.cfg.l && hit && aligned && pmp.cfg.a === idx.U, s"The access matches ${name} mode with lock bit high", "Cover PMP access with lock bit")
}
val cur = WireInit(pmp)
cur.cfg.r := aligned && (pmp.cfg.r || ignore)
cur.cfg.w := aligned && (pmp.cfg.w || ignore)
cur.cfg.x := aligned && (pmp.cfg.x || ignore)
Mux(hit, cur, prev)
}
io.r := res.cfg.r
io.w := res.cfg.w
io.x := res.cfg.x
} | module PMPChecker_s2(
input [1:0] io_prv,
input io_pmp_0_cfg_l,
input [1:0] io_pmp_0_cfg_a,
input io_pmp_0_cfg_x,
input io_pmp_0_cfg_w,
input io_pmp_0_cfg_r,
input [29:0] io_pmp_0_addr,
input [31:0] io_pmp_0_mask,
input io_pmp_1_cfg_l,
input [1:0] io_pmp_1_cfg_a,
input io_pmp_1_cfg_x,
input io_pmp_1_cfg_w,
input io_pmp_1_cfg_r,
input [29:0] io_pmp_1_addr,
input [31:0] io_pmp_1_mask,
input io_pmp_2_cfg_l,
input [1:0] io_pmp_2_cfg_a,
input io_pmp_2_cfg_x,
input io_pmp_2_cfg_w,
input io_pmp_2_cfg_r,
input [29:0] io_pmp_2_addr,
input [31:0] io_pmp_2_mask,
input io_pmp_3_cfg_l,
input [1:0] io_pmp_3_cfg_a,
input io_pmp_3_cfg_x,
input io_pmp_3_cfg_w,
input io_pmp_3_cfg_r,
input [29:0] io_pmp_3_addr,
input [31:0] io_pmp_3_mask,
input io_pmp_4_cfg_l,
input [1:0] io_pmp_4_cfg_a,
input io_pmp_4_cfg_x,
input io_pmp_4_cfg_w,
input io_pmp_4_cfg_r,
input [29:0] io_pmp_4_addr,
input [31:0] io_pmp_4_mask,
input io_pmp_5_cfg_l,
input [1:0] io_pmp_5_cfg_a,
input io_pmp_5_cfg_x,
input io_pmp_5_cfg_w,
input io_pmp_5_cfg_r,
input [29:0] io_pmp_5_addr,
input [31:0] io_pmp_5_mask,
input io_pmp_6_cfg_l,
input [1:0] io_pmp_6_cfg_a,
input io_pmp_6_cfg_x,
input io_pmp_6_cfg_w,
input io_pmp_6_cfg_r,
input [29:0] io_pmp_6_addr,
input [31:0] io_pmp_6_mask,
input io_pmp_7_cfg_l,
input [1:0] io_pmp_7_cfg_a,
input io_pmp_7_cfg_x,
input io_pmp_7_cfg_w,
input io_pmp_7_cfg_r,
input [29:0] io_pmp_7_addr,
input [31:0] io_pmp_7_mask,
input [31:0] io_addr,
output io_r,
output io_w,
output io_x
);
wire res_hit = io_pmp_7_cfg_a[1] ? ((io_addr ^ {io_pmp_7_addr, 2'h0}) & ~io_pmp_7_mask) == 32'h0 : io_pmp_7_cfg_a[0] & io_addr >= {io_pmp_6_addr, 2'h0} & io_addr < {io_pmp_7_addr, 2'h0};
wire res_ignore = io_prv[1] & ~io_pmp_7_cfg_l;
wire res_hit_1 = io_pmp_6_cfg_a[1] ? ((io_addr ^ {io_pmp_6_addr, 2'h0}) & ~io_pmp_6_mask) == 32'h0 : io_pmp_6_cfg_a[0] & io_addr >= {io_pmp_5_addr, 2'h0} & io_addr < {io_pmp_6_addr, 2'h0};
wire res_ignore_1 = io_prv[1] & ~io_pmp_6_cfg_l;
wire res_hit_2 = io_pmp_5_cfg_a[1] ? ((io_addr ^ {io_pmp_5_addr, 2'h0}) & ~io_pmp_5_mask) == 32'h0 : io_pmp_5_cfg_a[0] & io_addr >= {io_pmp_4_addr, 2'h0} & io_addr < {io_pmp_5_addr, 2'h0};
wire res_ignore_2 = io_prv[1] & ~io_pmp_5_cfg_l;
wire res_hit_3 = io_pmp_4_cfg_a[1] ? ((io_addr ^ {io_pmp_4_addr, 2'h0}) & ~io_pmp_4_mask) == 32'h0 : io_pmp_4_cfg_a[0] & io_addr >= {io_pmp_3_addr, 2'h0} & io_addr < {io_pmp_4_addr, 2'h0};
wire res_ignore_3 = io_prv[1] & ~io_pmp_4_cfg_l;
wire res_hit_4 = io_pmp_3_cfg_a[1] ? ((io_addr ^ {io_pmp_3_addr, 2'h0}) & ~io_pmp_3_mask) == 32'h0 : io_pmp_3_cfg_a[0] & io_addr >= {io_pmp_2_addr, 2'h0} & io_addr < {io_pmp_3_addr, 2'h0};
wire res_ignore_4 = io_prv[1] & ~io_pmp_3_cfg_l;
wire res_hit_5 = io_pmp_2_cfg_a[1] ? ((io_addr ^ {io_pmp_2_addr, 2'h0}) & ~io_pmp_2_mask) == 32'h0 : io_pmp_2_cfg_a[0] & io_addr >= {io_pmp_1_addr, 2'h0} & io_addr < {io_pmp_2_addr, 2'h0};
wire res_ignore_5 = io_prv[1] & ~io_pmp_2_cfg_l;
wire res_hit_6 = io_pmp_1_cfg_a[1] ? ((io_addr ^ {io_pmp_1_addr, 2'h0}) & ~io_pmp_1_mask) == 32'h0 : io_pmp_1_cfg_a[0] & io_addr >= {io_pmp_0_addr, 2'h0} & io_addr < {io_pmp_1_addr, 2'h0};
wire res_ignore_6 = io_prv[1] & ~io_pmp_1_cfg_l;
wire res_hit_7 = io_pmp_0_cfg_a[1] ? ((io_addr ^ {io_pmp_0_addr, 2'h0}) & ~io_pmp_0_mask) == 32'h0 : io_pmp_0_cfg_a[0] & io_addr < {io_pmp_0_addr, 2'h0};
wire res_ignore_7 = io_prv[1] & ~io_pmp_0_cfg_l;
assign io_r = res_hit_7 ? io_pmp_0_cfg_r | res_ignore_7 : res_hit_6 ? io_pmp_1_cfg_r | res_ignore_6 : res_hit_5 ? io_pmp_2_cfg_r | res_ignore_5 : res_hit_4 ? io_pmp_3_cfg_r | res_ignore_4 : res_hit_3 ? io_pmp_4_cfg_r | res_ignore_3 : res_hit_2 ? io_pmp_5_cfg_r | res_ignore_2 : res_hit_1 ? io_pmp_6_cfg_r | res_ignore_1 : res_hit ? io_pmp_7_cfg_r | res_ignore : io_prv[1];
assign io_w = res_hit_7 ? io_pmp_0_cfg_w | res_ignore_7 : res_hit_6 ? io_pmp_1_cfg_w | res_ignore_6 : res_hit_5 ? io_pmp_2_cfg_w | res_ignore_5 : res_hit_4 ? io_pmp_3_cfg_w | res_ignore_4 : res_hit_3 ? io_pmp_4_cfg_w | res_ignore_3 : res_hit_2 ? io_pmp_5_cfg_w | res_ignore_2 : res_hit_1 ? io_pmp_6_cfg_w | res_ignore_1 : res_hit ? io_pmp_7_cfg_w | res_ignore : io_prv[1];
assign io_x = res_hit_7 ? io_pmp_0_cfg_x | res_ignore_7 : res_hit_6 ? io_pmp_1_cfg_x | res_ignore_6 : res_hit_5 ? io_pmp_2_cfg_x | res_ignore_5 : res_hit_4 ? io_pmp_3_cfg_x | res_ignore_4 : res_hit_3 ? io_pmp_4_cfg_x | res_ignore_3 : res_hit_2 ? io_pmp_5_cfg_x | res_ignore_2 : res_hit_1 ? io_pmp_6_cfg_x | res_ignore_1 : res_hit ? io_pmp_7_cfg_x | res_ignore : io_prv[1];
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tile
import chisel3._
import chisel3.util._
import chisel3.{DontCare, WireInit, withClock, withReset}
import chisel3.experimental.SourceInfo
import chisel3.experimental.dataview._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.rocket._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property
case class FPUParams(
minFLen: Int = 32,
fLen: Int = 64,
divSqrt: Boolean = true,
sfmaLatency: Int = 3,
dfmaLatency: Int = 4,
fpmuLatency: Int = 2,
ifpuLatency: Int = 2
)
object FPConstants
{
val RM_SZ = 3
val FLAGS_SZ = 5
}
trait HasFPUCtrlSigs {
val ldst = Bool()
val wen = Bool()
val ren1 = Bool()
val ren2 = Bool()
val ren3 = Bool()
val swap12 = Bool()
val swap23 = Bool()
val typeTagIn = UInt(2.W)
val typeTagOut = UInt(2.W)
val fromint = Bool()
val toint = Bool()
val fastpipe = Bool()
val fma = Bool()
val div = Bool()
val sqrt = Bool()
val wflags = Bool()
val vec = Bool()
}
class FPUCtrlSigs extends Bundle with HasFPUCtrlSigs
class FPUDecoder(implicit p: Parameters) extends FPUModule()(p) {
val io = IO(new Bundle {
val inst = Input(Bits(32.W))
val sigs = Output(new FPUCtrlSigs())
})
private val X2 = BitPat.dontCare(2)
val default = List(X,X,X,X,X,X,X,X2,X2,X,X,X,X,X,X,X,N)
val h: Array[(BitPat, List[BitPat])] =
Array(FLH -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSH -> List(Y,N,N,Y,N,Y,X, I, H,N,Y,N,N,N,N,N,N),
FMV_H_X -> List(N,Y,N,N,N,X,X, H, I,Y,N,N,N,N,N,N,N),
FCVT_H_W -> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_WU-> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_L -> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_LU-> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FMV_X_H -> List(N,N,Y,N,N,N,X, I, H,N,Y,N,N,N,N,N,N),
FCLASS_H -> List(N,N,Y,N,N,N,X, H, H,N,Y,N,N,N,N,N,N),
FCVT_W_H -> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_H-> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_H -> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_H-> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_S_H -> List(N,Y,Y,N,N,N,X, H, S,N,N,Y,N,N,N,Y,N),
FCVT_H_S -> List(N,Y,Y,N,N,N,X, S, H,N,N,Y,N,N,N,Y,N),
FEQ_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FLT_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FLE_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FSGNJ_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FSGNJN_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FSGNJX_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FMIN_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,Y,N),
FMAX_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,Y,N),
FADD_H -> List(N,Y,Y,Y,N,N,Y, H, H,N,N,N,Y,N,N,Y,N),
FSUB_H -> List(N,Y,Y,Y,N,N,Y, H, H,N,N,N,Y,N,N,Y,N),
FMUL_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,N,Y,N,N,Y,N),
FMADD_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FMSUB_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FNMADD_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FNMSUB_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FDIV_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,N,N,Y,N,Y,N),
FSQRT_H -> List(N,Y,Y,N,N,N,X, H, H,N,N,N,N,N,Y,Y,N))
val f: Array[(BitPat, List[BitPat])] =
Array(FLW -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSW -> List(Y,N,N,Y,N,Y,X, I, S,N,Y,N,N,N,N,N,N),
FMV_W_X -> List(N,Y,N,N,N,X,X, S, I,Y,N,N,N,N,N,N,N),
FCVT_S_W -> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_WU-> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_L -> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_LU-> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FMV_X_W -> List(N,N,Y,N,N,N,X, I, S,N,Y,N,N,N,N,N,N),
FCLASS_S -> List(N,N,Y,N,N,N,X, S, S,N,Y,N,N,N,N,N,N),
FCVT_W_S -> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_S-> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_S -> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_S-> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FEQ_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FLT_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FLE_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FSGNJ_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FSGNJN_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FSGNJX_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FMIN_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,Y,N),
FMAX_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,Y,N),
FADD_S -> List(N,Y,Y,Y,N,N,Y, S, S,N,N,N,Y,N,N,Y,N),
FSUB_S -> List(N,Y,Y,Y,N,N,Y, S, S,N,N,N,Y,N,N,Y,N),
FMUL_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,N,Y,N,N,Y,N),
FMADD_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FMSUB_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FNMADD_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FNMSUB_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FDIV_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,N,N,Y,N,Y,N),
FSQRT_S -> List(N,Y,Y,N,N,N,X, S, S,N,N,N,N,N,Y,Y,N))
val d: Array[(BitPat, List[BitPat])] =
Array(FLD -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSD -> List(Y,N,N,Y,N,Y,X, I, D,N,Y,N,N,N,N,N,N),
FMV_D_X -> List(N,Y,N,N,N,X,X, D, I,Y,N,N,N,N,N,N,N),
FCVT_D_W -> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_WU-> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_L -> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_LU-> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FMV_X_D -> List(N,N,Y,N,N,N,X, I, D,N,Y,N,N,N,N,N,N),
FCLASS_D -> List(N,N,Y,N,N,N,X, D, D,N,Y,N,N,N,N,N,N),
FCVT_W_D -> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_D-> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_D -> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_D-> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_S_D -> List(N,Y,Y,N,N,N,X, D, S,N,N,Y,N,N,N,Y,N),
FCVT_D_S -> List(N,Y,Y,N,N,N,X, S, D,N,N,Y,N,N,N,Y,N),
FEQ_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FLT_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FLE_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FSGNJ_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FSGNJN_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FSGNJX_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FMIN_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,Y,N),
FMAX_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,Y,N),
FADD_D -> List(N,Y,Y,Y,N,N,Y, D, D,N,N,N,Y,N,N,Y,N),
FSUB_D -> List(N,Y,Y,Y,N,N,Y, D, D,N,N,N,Y,N,N,Y,N),
FMUL_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,N,Y,N,N,Y,N),
FMADD_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FMSUB_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FNMADD_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FNMSUB_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FDIV_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,N,N,Y,N,Y,N),
FSQRT_D -> List(N,Y,Y,N,N,N,X, D, D,N,N,N,N,N,Y,Y,N))
val fcvt_hd: Array[(BitPat, List[BitPat])] =
Array(FCVT_H_D -> List(N,Y,Y,N,N,N,X, D, H,N,N,Y,N,N,N,Y,N),
FCVT_D_H -> List(N,Y,Y,N,N,N,X, H, D,N,N,Y,N,N,N,Y,N))
val vfmv_f_s: Array[(BitPat, List[BitPat])] =
Array(VFMV_F_S -> List(N,Y,N,N,N,N,X,X2,X2,N,N,N,N,N,N,N,Y))
val insns = ((minFLen, fLen) match {
case (32, 32) => f
case (16, 32) => h ++ f
case (32, 64) => f ++ d
case (16, 64) => h ++ f ++ d ++ fcvt_hd
case other => throw new Exception(s"minFLen = ${minFLen} & fLen = ${fLen} is an unsupported configuration")
}) ++ (if (usingVector) vfmv_f_s else Array[(BitPat, List[BitPat])]())
val decoder = DecodeLogic(io.inst, default, insns)
val s = io.sigs
val sigs = Seq(s.ldst, s.wen, s.ren1, s.ren2, s.ren3, s.swap12,
s.swap23, s.typeTagIn, s.typeTagOut, s.fromint, s.toint,
s.fastpipe, s.fma, s.div, s.sqrt, s.wflags, s.vec)
sigs zip decoder map {case(s,d) => s := d}
}
class FPUCoreIO(implicit p: Parameters) extends CoreBundle()(p) {
val hartid = Input(UInt(hartIdLen.W))
val time = Input(UInt(xLen.W))
val inst = Input(Bits(32.W))
val fromint_data = Input(Bits(xLen.W))
val fcsr_rm = Input(Bits(FPConstants.RM_SZ.W))
val fcsr_flags = Valid(Bits(FPConstants.FLAGS_SZ.W))
val v_sew = Input(UInt(3.W))
val store_data = Output(Bits(fLen.W))
val toint_data = Output(Bits(xLen.W))
val ll_resp_val = Input(Bool())
val ll_resp_type = Input(Bits(3.W))
val ll_resp_tag = Input(UInt(5.W))
val ll_resp_data = Input(Bits(fLen.W))
val valid = Input(Bool())
val fcsr_rdy = Output(Bool())
val nack_mem = Output(Bool())
val illegal_rm = Output(Bool())
val killx = Input(Bool())
val killm = Input(Bool())
val dec = Output(new FPUCtrlSigs())
val sboard_set = Output(Bool())
val sboard_clr = Output(Bool())
val sboard_clra = Output(UInt(5.W))
val keep_clock_enabled = Input(Bool())
}
class FPUIO(implicit p: Parameters) extends FPUCoreIO ()(p) {
val cp_req = Flipped(Decoupled(new FPInput())) //cp doesn't pay attn to kill sigs
val cp_resp = Decoupled(new FPResult())
}
class FPResult(implicit p: Parameters) extends CoreBundle()(p) {
val data = Bits((fLen+1).W)
val exc = Bits(FPConstants.FLAGS_SZ.W)
}
class IntToFPInput(implicit p: Parameters) extends CoreBundle()(p) with HasFPUCtrlSigs {
val rm = Bits(FPConstants.RM_SZ.W)
val typ = Bits(2.W)
val in1 = Bits(xLen.W)
}
class FPInput(implicit p: Parameters) extends CoreBundle()(p) with HasFPUCtrlSigs {
val rm = Bits(FPConstants.RM_SZ.W)
val fmaCmd = Bits(2.W)
val typ = Bits(2.W)
val fmt = Bits(2.W)
val in1 = Bits((fLen+1).W)
val in2 = Bits((fLen+1).W)
val in3 = Bits((fLen+1).W)
}
case class FType(exp: Int, sig: Int) {
def ieeeWidth = exp + sig
def recodedWidth = ieeeWidth + 1
def ieeeQNaN = ((BigInt(1) << (ieeeWidth - 1)) - (BigInt(1) << (sig - 2))).U(ieeeWidth.W)
def qNaN = ((BigInt(7) << (exp + sig - 3)) + (BigInt(1) << (sig - 2))).U(recodedWidth.W)
def isNaN(x: UInt) = x(sig + exp - 1, sig + exp - 3).andR
def isSNaN(x: UInt) = isNaN(x) && !x(sig - 2)
def classify(x: UInt) = {
val sign = x(sig + exp)
val code = x(exp + sig - 1, exp + sig - 3)
val codeHi = code(2, 1)
val isSpecial = codeHi === 3.U
val isHighSubnormalIn = x(exp + sig - 3, sig - 1) < 2.U
val isSubnormal = code === 1.U || codeHi === 1.U && isHighSubnormalIn
val isNormal = codeHi === 1.U && !isHighSubnormalIn || codeHi === 2.U
val isZero = code === 0.U
val isInf = isSpecial && !code(0)
val isNaN = code.andR
val isSNaN = isNaN && !x(sig-2)
val isQNaN = isNaN && x(sig-2)
Cat(isQNaN, isSNaN, isInf && !sign, isNormal && !sign,
isSubnormal && !sign, isZero && !sign, isZero && sign,
isSubnormal && sign, isNormal && sign, isInf && sign)
}
// convert between formats, ignoring rounding, range, NaN
def unsafeConvert(x: UInt, to: FType) = if (this == to) x else {
val sign = x(sig + exp)
val fractIn = x(sig - 2, 0)
val expIn = x(sig + exp - 1, sig - 1)
val fractOut = fractIn << to.sig >> sig
val expOut = {
val expCode = expIn(exp, exp - 2)
val commonCase = (expIn + (1 << to.exp).U) - (1 << exp).U
Mux(expCode === 0.U || expCode >= 6.U, Cat(expCode, commonCase(to.exp - 3, 0)), commonCase(to.exp, 0))
}
Cat(sign, expOut, fractOut)
}
private def ieeeBundle = {
val expWidth = exp
class IEEEBundle extends Bundle {
val sign = Bool()
val exp = UInt(expWidth.W)
val sig = UInt((ieeeWidth-expWidth-1).W)
}
new IEEEBundle
}
def unpackIEEE(x: UInt) = x.asTypeOf(ieeeBundle)
def recode(x: UInt) = hardfloat.recFNFromFN(exp, sig, x)
def ieee(x: UInt) = hardfloat.fNFromRecFN(exp, sig, x)
}
object FType {
val H = new FType(5, 11)
val S = new FType(8, 24)
val D = new FType(11, 53)
val all = List(H, S, D)
}
trait HasFPUParameters {
require(fLen == 0 || FType.all.exists(_.ieeeWidth == fLen))
val minFLen: Int
val fLen: Int
def xLen: Int
val minXLen = 32
val nIntTypes = log2Ceil(xLen/minXLen) + 1
def floatTypes = FType.all.filter(t => minFLen <= t.ieeeWidth && t.ieeeWidth <= fLen)
def minType = floatTypes.head
def maxType = floatTypes.last
def prevType(t: FType) = floatTypes(typeTag(t) - 1)
def maxExpWidth = maxType.exp
def maxSigWidth = maxType.sig
def typeTag(t: FType) = floatTypes.indexOf(t)
def typeTagWbOffset = (FType.all.indexOf(minType) + 1).U
def typeTagGroup(t: FType) = (if (floatTypes.contains(t)) typeTag(t) else typeTag(maxType)).U
// typeTag
def H = typeTagGroup(FType.H)
def S = typeTagGroup(FType.S)
def D = typeTagGroup(FType.D)
def I = typeTag(maxType).U
private def isBox(x: UInt, t: FType): Bool = x(t.sig + t.exp, t.sig + t.exp - 4).andR
private def box(x: UInt, xt: FType, y: UInt, yt: FType): UInt = {
require(xt.ieeeWidth == 2 * yt.ieeeWidth)
val swizzledNaN = Cat(
x(xt.sig + xt.exp, xt.sig + xt.exp - 3),
x(xt.sig - 2, yt.recodedWidth - 1).andR,
x(xt.sig + xt.exp - 5, xt.sig),
y(yt.recodedWidth - 2),
x(xt.sig - 2, yt.recodedWidth - 1),
y(yt.recodedWidth - 1),
y(yt.recodedWidth - 3, 0))
Mux(xt.isNaN(x), swizzledNaN, x)
}
// implement NaN unboxing for FU inputs
def unbox(x: UInt, tag: UInt, exactType: Option[FType]): UInt = {
val outType = exactType.getOrElse(maxType)
def helper(x: UInt, t: FType): Seq[(Bool, UInt)] = {
val prev =
if (t == minType) {
Seq()
} else {
val prevT = prevType(t)
val unswizzled = Cat(
x(prevT.sig + prevT.exp - 1),
x(t.sig - 1),
x(prevT.sig + prevT.exp - 2, 0))
val prev = helper(unswizzled, prevT)
val isbox = isBox(x, t)
prev.map(p => (isbox && p._1, p._2))
}
prev :+ (true.B, t.unsafeConvert(x, outType))
}
val (oks, floats) = helper(x, maxType).unzip
if (exactType.isEmpty || floatTypes.size == 1) {
Mux(oks(tag), floats(tag), maxType.qNaN)
} else {
val t = exactType.get
floats(typeTag(t)) | Mux(oks(typeTag(t)), 0.U, t.qNaN)
}
}
// make sure that the redundant bits in the NaN-boxed encoding are consistent
def consistent(x: UInt): Bool = {
def helper(x: UInt, t: FType): Bool = if (typeTag(t) == 0) true.B else {
val prevT = prevType(t)
val unswizzled = Cat(
x(prevT.sig + prevT.exp - 1),
x(t.sig - 1),
x(prevT.sig + prevT.exp - 2, 0))
val prevOK = !isBox(x, t) || helper(unswizzled, prevT)
val curOK = !t.isNaN(x) || x(t.sig + t.exp - 4) === x(t.sig - 2, prevT.recodedWidth - 1).andR
prevOK && curOK
}
helper(x, maxType)
}
// generate a NaN box from an FU result
def box(x: UInt, t: FType): UInt = {
if (t == maxType) {
x
} else {
val nt = floatTypes(typeTag(t) + 1)
val bigger = box(((BigInt(1) << nt.recodedWidth)-1).U, nt, x, t)
bigger | ((BigInt(1) << maxType.recodedWidth) - (BigInt(1) << nt.recodedWidth)).U
}
}
// generate a NaN box from an FU result
def box(x: UInt, tag: UInt): UInt = {
val opts = floatTypes.map(t => box(x, t))
opts(tag)
}
// zap bits that hardfloat thinks are don't-cares, but we do care about
def sanitizeNaN(x: UInt, t: FType): UInt = {
if (typeTag(t) == 0) {
x
} else {
val maskedNaN = x & ~((BigInt(1) << (t.sig-1)) | (BigInt(1) << (t.sig+t.exp-4))).U(t.recodedWidth.W)
Mux(t.isNaN(x), maskedNaN, x)
}
}
// implement NaN boxing and recoding for FL*/fmv.*.x
def recode(x: UInt, tag: UInt): UInt = {
def helper(x: UInt, t: FType): UInt = {
if (typeTag(t) == 0) {
t.recode(x)
} else {
val prevT = prevType(t)
box(t.recode(x), t, helper(x, prevT), prevT)
}
}
// fill MSBs of subword loads to emulate a wider load of a NaN-boxed value
val boxes = floatTypes.map(t => ((BigInt(1) << maxType.ieeeWidth) - (BigInt(1) << t.ieeeWidth)).U)
helper(boxes(tag) | x, maxType)
}
// implement NaN unboxing and un-recoding for FS*/fmv.x.*
def ieee(x: UInt, t: FType = maxType): UInt = {
if (typeTag(t) == 0) {
t.ieee(x)
} else {
val unrecoded = t.ieee(x)
val prevT = prevType(t)
val prevRecoded = Cat(
x(prevT.recodedWidth-2),
x(t.sig-1),
x(prevT.recodedWidth-3, 0))
val prevUnrecoded = ieee(prevRecoded, prevT)
Cat(unrecoded >> prevT.ieeeWidth, Mux(t.isNaN(x), prevUnrecoded, unrecoded(prevT.ieeeWidth-1, 0)))
}
}
}
abstract class FPUModule(implicit val p: Parameters) extends Module with HasCoreParameters with HasFPUParameters
class FPToInt(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
class Output extends Bundle {
val in = new FPInput
val lt = Bool()
val store = Bits(fLen.W)
val toint = Bits(xLen.W)
val exc = Bits(FPConstants.FLAGS_SZ.W)
}
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new Output)
})
val in = RegEnable(io.in.bits, io.in.valid)
val valid = RegNext(io.in.valid)
val dcmp = Module(new hardfloat.CompareRecFN(maxExpWidth, maxSigWidth))
dcmp.io.a := in.in1
dcmp.io.b := in.in2
dcmp.io.signaling := !in.rm(1)
val tag = in.typeTagOut
val toint_ieee = (floatTypes.map(t => if (t == FType.H) Fill(maxType.ieeeWidth / minXLen, ieee(in.in1)(15, 0).sextTo(minXLen))
else Fill(maxType.ieeeWidth / t.ieeeWidth, ieee(in.in1)(t.ieeeWidth - 1, 0))): Seq[UInt])(tag)
val toint = WireDefault(toint_ieee)
val intType = WireDefault(in.fmt(0))
io.out.bits.store := (floatTypes.map(t => Fill(fLen / t.ieeeWidth, ieee(in.in1)(t.ieeeWidth - 1, 0))): Seq[UInt])(tag)
io.out.bits.toint := ((0 until nIntTypes).map(i => toint((minXLen << i) - 1, 0).sextTo(xLen)): Seq[UInt])(intType)
io.out.bits.exc := 0.U
when (in.rm(0)) {
val classify_out = (floatTypes.map(t => t.classify(maxType.unsafeConvert(in.in1, t))): Seq[UInt])(tag)
toint := classify_out | (toint_ieee >> minXLen << minXLen)
intType := false.B
}
when (in.wflags) { // feq/flt/fle, fcvt
toint := (~in.rm & Cat(dcmp.io.lt, dcmp.io.eq)).orR | (toint_ieee >> minXLen << minXLen)
io.out.bits.exc := dcmp.io.exceptionFlags
intType := false.B
when (!in.ren2) { // fcvt
val cvtType = in.typ.extract(log2Ceil(nIntTypes), 1)
intType := cvtType
val conv = Module(new hardfloat.RecFNToIN(maxExpWidth, maxSigWidth, xLen))
conv.io.in := in.in1
conv.io.roundingMode := in.rm
conv.io.signedOut := ~in.typ(0)
toint := conv.io.out
io.out.bits.exc := Cat(conv.io.intExceptionFlags(2, 1).orR, 0.U(3.W), conv.io.intExceptionFlags(0))
for (i <- 0 until nIntTypes-1) {
val w = minXLen << i
when (cvtType === i.U) {
val narrow = Module(new hardfloat.RecFNToIN(maxExpWidth, maxSigWidth, w))
narrow.io.in := in.in1
narrow.io.roundingMode := in.rm
narrow.io.signedOut := ~in.typ(0)
val excSign = in.in1(maxExpWidth + maxSigWidth) && !maxType.isNaN(in.in1)
val excOut = Cat(conv.io.signedOut === excSign, Fill(w-1, !excSign))
val invalid = conv.io.intExceptionFlags(2) || narrow.io.intExceptionFlags(1)
when (invalid) { toint := Cat(conv.io.out >> w, excOut) }
io.out.bits.exc := Cat(invalid, 0.U(3.W), !invalid && conv.io.intExceptionFlags(0))
}
}
}
}
io.out.valid := valid
io.out.bits.lt := dcmp.io.lt || (dcmp.io.a.asSInt < 0.S && dcmp.io.b.asSInt >= 0.S)
io.out.bits.in := in
}
class IntToFP(val latency: Int)(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
val io = IO(new Bundle {
val in = Flipped(Valid(new IntToFPInput))
val out = Valid(new FPResult)
})
val in = Pipe(io.in)
val tag = in.bits.typeTagIn
val mux = Wire(new FPResult)
mux.exc := 0.U
mux.data := recode(in.bits.in1, tag)
val intValue = {
val res = WireDefault(in.bits.in1.asSInt)
for (i <- 0 until nIntTypes-1) {
val smallInt = in.bits.in1((minXLen << i) - 1, 0)
when (in.bits.typ.extract(log2Ceil(nIntTypes), 1) === i.U) {
res := Mux(in.bits.typ(0), smallInt.zext, smallInt.asSInt)
}
}
res.asUInt
}
when (in.bits.wflags) { // fcvt
// could be improved for RVD/RVQ with a single variable-position rounding
// unit, rather than N fixed-position ones
val i2fResults = for (t <- floatTypes) yield {
val i2f = Module(new hardfloat.INToRecFN(xLen, t.exp, t.sig))
i2f.io.signedIn := ~in.bits.typ(0)
i2f.io.in := intValue
i2f.io.roundingMode := in.bits.rm
i2f.io.detectTininess := hardfloat.consts.tininess_afterRounding
(sanitizeNaN(i2f.io.out, t), i2f.io.exceptionFlags)
}
val (data, exc) = i2fResults.unzip
val dataPadded = data.init.map(d => Cat(data.last >> d.getWidth, d)) :+ data.last
mux.data := dataPadded(tag)
mux.exc := exc(tag)
}
io.out <> Pipe(in.valid, mux, latency-1)
}
class FPToFP(val latency: Int)(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new FPResult)
val lt = Input(Bool()) // from FPToInt
})
val in = Pipe(io.in)
val signNum = Mux(in.bits.rm(1), in.bits.in1 ^ in.bits.in2, Mux(in.bits.rm(0), ~in.bits.in2, in.bits.in2))
val fsgnj = Cat(signNum(fLen), in.bits.in1(fLen-1, 0))
val fsgnjMux = Wire(new FPResult)
fsgnjMux.exc := 0.U
fsgnjMux.data := fsgnj
when (in.bits.wflags) { // fmin/fmax
val isnan1 = maxType.isNaN(in.bits.in1)
val isnan2 = maxType.isNaN(in.bits.in2)
val isInvalid = maxType.isSNaN(in.bits.in1) || maxType.isSNaN(in.bits.in2)
val isNaNOut = isnan1 && isnan2
val isLHS = isnan2 || in.bits.rm(0) =/= io.lt && !isnan1
fsgnjMux.exc := isInvalid << 4
fsgnjMux.data := Mux(isNaNOut, maxType.qNaN, Mux(isLHS, in.bits.in1, in.bits.in2))
}
val inTag = in.bits.typeTagIn
val outTag = in.bits.typeTagOut
val mux = WireDefault(fsgnjMux)
for (t <- floatTypes.init) {
when (outTag === typeTag(t).U) {
mux.data := Cat(fsgnjMux.data >> t.recodedWidth, maxType.unsafeConvert(fsgnjMux.data, t))
}
}
when (in.bits.wflags && !in.bits.ren2) { // fcvt
if (floatTypes.size > 1) {
// widening conversions simply canonicalize NaN operands
val widened = Mux(maxType.isNaN(in.bits.in1), maxType.qNaN, in.bits.in1)
fsgnjMux.data := widened
fsgnjMux.exc := maxType.isSNaN(in.bits.in1) << 4
// narrowing conversions require rounding (for RVQ, this could be
// optimized to use a single variable-position rounding unit, rather
// than two fixed-position ones)
for (outType <- floatTypes.init) when (outTag === typeTag(outType).U && ((typeTag(outType) == 0).B || outTag < inTag)) {
val narrower = Module(new hardfloat.RecFNToRecFN(maxType.exp, maxType.sig, outType.exp, outType.sig))
narrower.io.in := in.bits.in1
narrower.io.roundingMode := in.bits.rm
narrower.io.detectTininess := hardfloat.consts.tininess_afterRounding
val narrowed = sanitizeNaN(narrower.io.out, outType)
mux.data := Cat(fsgnjMux.data >> narrowed.getWidth, narrowed)
mux.exc := narrower.io.exceptionFlags
}
}
}
io.out <> Pipe(in.valid, mux, latency-1)
}
class MulAddRecFNPipe(latency: Int, expWidth: Int, sigWidth: Int) extends Module
{
override def desiredName = s"MulAddRecFNPipe_l${latency}_e${expWidth}_s${sigWidth}"
require(latency<=2)
val io = IO(new Bundle {
val validin = Input(Bool())
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
val validout = Output(Bool())
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul = Module(new hardfloat.MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul = Module(new hardfloat.MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
val valid_stage0 = Wire(Bool())
val roundingMode_stage0 = Wire(UInt(3.W))
val detectTininess_stage0 = Wire(UInt(1.W))
val postmul_regs = if(latency>0) 1 else 0
mulAddRecFNToRaw_postMul.io.fromPreMul := Pipe(io.validin, mulAddRecFNToRaw_preMul.io.toPostMul, postmul_regs).bits
mulAddRecFNToRaw_postMul.io.mulAddResult := Pipe(io.validin, mulAddResult, postmul_regs).bits
mulAddRecFNToRaw_postMul.io.roundingMode := Pipe(io.validin, io.roundingMode, postmul_regs).bits
roundingMode_stage0 := Pipe(io.validin, io.roundingMode, postmul_regs).bits
detectTininess_stage0 := Pipe(io.validin, io.detectTininess, postmul_regs).bits
valid_stage0 := Pipe(io.validin, false.B, postmul_regs).valid
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN = Module(new hardfloat.RoundRawFNToRecFN(expWidth, sigWidth, 0))
val round_regs = if(latency==2) 1 else 0
roundRawFNToRecFN.io.invalidExc := Pipe(valid_stage0, mulAddRecFNToRaw_postMul.io.invalidExc, round_regs).bits
roundRawFNToRecFN.io.in := Pipe(valid_stage0, mulAddRecFNToRaw_postMul.io.rawOut, round_regs).bits
roundRawFNToRecFN.io.roundingMode := Pipe(valid_stage0, roundingMode_stage0, round_regs).bits
roundRawFNToRecFN.io.detectTininess := Pipe(valid_stage0, detectTininess_stage0, round_regs).bits
io.validout := Pipe(valid_stage0, false.B, round_regs).valid
roundRawFNToRecFN.io.infiniteExc := false.B
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
class FPUFMAPipe(val latency: Int, val t: FType)
(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
override def desiredName = s"FPUFMAPipe_l${latency}_f${t.ieeeWidth}"
require(latency>0)
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new FPResult)
})
val valid = RegNext(io.in.valid)
val in = Reg(new FPInput)
when (io.in.valid) {
val one = 1.U << (t.sig + t.exp - 1)
val zero = (io.in.bits.in1 ^ io.in.bits.in2) & (1.U << (t.sig + t.exp))
val cmd_fma = io.in.bits.ren3
val cmd_addsub = io.in.bits.swap23
in := io.in.bits
when (cmd_addsub) { in.in2 := one }
when (!(cmd_fma || cmd_addsub)) { in.in3 := zero }
}
val fma = Module(new MulAddRecFNPipe((latency-1) min 2, t.exp, t.sig))
fma.io.validin := valid
fma.io.op := in.fmaCmd
fma.io.roundingMode := in.rm
fma.io.detectTininess := hardfloat.consts.tininess_afterRounding
fma.io.a := in.in1
fma.io.b := in.in2
fma.io.c := in.in3
val res = Wire(new FPResult)
res.data := sanitizeNaN(fma.io.out, t)
res.exc := fma.io.exceptionFlags
io.out := Pipe(fma.io.validout, res, (latency-3) max 0)
}
class FPU(cfg: FPUParams)(implicit p: Parameters) extends FPUModule()(p) {
val io = IO(new FPUIO)
val (useClockGating, useDebugROB) = coreParams match {
case r: RocketCoreParams =>
val sz = if (r.debugROB.isDefined) r.debugROB.get.size else 1
(r.clockGate, sz < 1)
case _ => (false, false)
}
val clock_en_reg = Reg(Bool())
val clock_en = clock_en_reg || io.cp_req.valid
val gated_clock =
if (!useClockGating) clock
else ClockGate(clock, clock_en, "fpu_clock_gate")
val fp_decoder = Module(new FPUDecoder)
fp_decoder.io.inst := io.inst
val id_ctrl = WireInit(fp_decoder.io.sigs)
coreParams match { case r: RocketCoreParams => r.vector.map(v => {
val v_decode = v.decoder(p) // Only need to get ren1
v_decode.io.inst := io.inst
v_decode.io.vconfig := DontCare // core deals with this
when (v_decode.io.legal && v_decode.io.read_frs1) {
id_ctrl.ren1 := true.B
id_ctrl.swap12 := false.B
id_ctrl.toint := true.B
id_ctrl.typeTagIn := I
id_ctrl.typeTagOut := Mux(io.v_sew === 3.U, D, S)
}
when (v_decode.io.write_frd) { id_ctrl.wen := true.B }
})}
val ex_reg_valid = RegNext(io.valid, false.B)
val ex_reg_inst = RegEnable(io.inst, io.valid)
val ex_reg_ctrl = RegEnable(id_ctrl, io.valid)
val ex_ra = List.fill(3)(Reg(UInt()))
// load/vector response
val load_wb = RegNext(io.ll_resp_val)
val load_wb_typeTag = RegEnable(io.ll_resp_type(1,0) - typeTagWbOffset, io.ll_resp_val)
val load_wb_data = RegEnable(io.ll_resp_data, io.ll_resp_val)
val load_wb_tag = RegEnable(io.ll_resp_tag, io.ll_resp_val)
class FPUImpl { // entering gated-clock domain
val req_valid = ex_reg_valid || io.cp_req.valid
val ex_cp_valid = io.cp_req.fire
val mem_cp_valid = RegNext(ex_cp_valid, false.B)
val wb_cp_valid = RegNext(mem_cp_valid, false.B)
val mem_reg_valid = RegInit(false.B)
val killm = (io.killm || io.nack_mem) && !mem_cp_valid
// Kill X-stage instruction if M-stage is killed. This prevents it from
// speculatively being sent to the div-sqrt unit, which can cause priority
// inversion for two back-to-back divides, the first of which is killed.
val killx = io.killx || mem_reg_valid && killm
mem_reg_valid := ex_reg_valid && !killx || ex_cp_valid
val mem_reg_inst = RegEnable(ex_reg_inst, ex_reg_valid)
val wb_reg_valid = RegNext(mem_reg_valid && (!killm || mem_cp_valid), false.B)
val cp_ctrl = Wire(new FPUCtrlSigs)
cp_ctrl :<>= io.cp_req.bits.viewAsSupertype(new FPUCtrlSigs)
io.cp_resp.valid := false.B
io.cp_resp.bits.data := 0.U
io.cp_resp.bits.exc := DontCare
val ex_ctrl = Mux(ex_cp_valid, cp_ctrl, ex_reg_ctrl)
val mem_ctrl = RegEnable(ex_ctrl, req_valid)
val wb_ctrl = RegEnable(mem_ctrl, mem_reg_valid)
// CoreMonitorBundle to monitor fp register file writes
val frfWriteBundle = Seq.fill(2)(WireInit(new CoreMonitorBundle(xLen, fLen), DontCare))
frfWriteBundle.foreach { i =>
i.clock := clock
i.reset := reset
i.hartid := io.hartid
i.timer := io.time(31,0)
i.valid := false.B
i.wrenx := false.B
i.wrenf := false.B
i.excpt := false.B
}
// regfile
val regfile = Mem(32, Bits((fLen+1).W))
when (load_wb) {
val wdata = recode(load_wb_data, load_wb_typeTag)
regfile(load_wb_tag) := wdata
assert(consistent(wdata))
if (enableCommitLog)
printf("f%d p%d 0x%x\n", load_wb_tag, load_wb_tag + 32.U, ieee(wdata))
if (useDebugROB)
DebugROB.pushWb(clock, reset, io.hartid, load_wb, load_wb_tag + 32.U, ieee(wdata))
frfWriteBundle(0).wrdst := load_wb_tag
frfWriteBundle(0).wrenf := true.B
frfWriteBundle(0).wrdata := ieee(wdata)
}
val ex_rs = ex_ra.map(a => regfile(a))
when (io.valid) {
when (id_ctrl.ren1) {
when (!id_ctrl.swap12) { ex_ra(0) := io.inst(19,15) }
when (id_ctrl.swap12) { ex_ra(1) := io.inst(19,15) }
}
when (id_ctrl.ren2) {
when (id_ctrl.swap12) { ex_ra(0) := io.inst(24,20) }
when (id_ctrl.swap23) { ex_ra(2) := io.inst(24,20) }
when (!id_ctrl.swap12 && !id_ctrl.swap23) { ex_ra(1) := io.inst(24,20) }
}
when (id_ctrl.ren3) { ex_ra(2) := io.inst(31,27) }
}
val ex_rm = Mux(ex_reg_inst(14,12) === 7.U, io.fcsr_rm, ex_reg_inst(14,12))
def fuInput(minT: Option[FType]): FPInput = {
val req = Wire(new FPInput)
val tag = ex_ctrl.typeTagIn
req.viewAsSupertype(new Bundle with HasFPUCtrlSigs) :#= ex_ctrl.viewAsSupertype(new Bundle with HasFPUCtrlSigs)
req.rm := ex_rm
req.in1 := unbox(ex_rs(0), tag, minT)
req.in2 := unbox(ex_rs(1), tag, minT)
req.in3 := unbox(ex_rs(2), tag, minT)
req.typ := ex_reg_inst(21,20)
req.fmt := ex_reg_inst(26,25)
req.fmaCmd := ex_reg_inst(3,2) | (!ex_ctrl.ren3 && ex_reg_inst(27))
when (ex_cp_valid) {
req := io.cp_req.bits
when (io.cp_req.bits.swap12) {
req.in1 := io.cp_req.bits.in2
req.in2 := io.cp_req.bits.in1
}
when (io.cp_req.bits.swap23) {
req.in2 := io.cp_req.bits.in3
req.in3 := io.cp_req.bits.in2
}
}
req
}
val sfma = Module(new FPUFMAPipe(cfg.sfmaLatency, FType.S))
sfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === S
sfma.io.in.bits := fuInput(Some(sfma.t))
val fpiu = Module(new FPToInt)
fpiu.io.in.valid := req_valid && (ex_ctrl.toint || ex_ctrl.div || ex_ctrl.sqrt || (ex_ctrl.fastpipe && ex_ctrl.wflags))
fpiu.io.in.bits := fuInput(None)
io.store_data := fpiu.io.out.bits.store
io.toint_data := fpiu.io.out.bits.toint
when(fpiu.io.out.valid && mem_cp_valid && mem_ctrl.toint){
io.cp_resp.bits.data := fpiu.io.out.bits.toint
io.cp_resp.valid := true.B
}
val ifpu = Module(new IntToFP(cfg.ifpuLatency))
ifpu.io.in.valid := req_valid && ex_ctrl.fromint
ifpu.io.in.bits := fpiu.io.in.bits
ifpu.io.in.bits.in1 := Mux(ex_cp_valid, io.cp_req.bits.in1, io.fromint_data)
val fpmu = Module(new FPToFP(cfg.fpmuLatency))
fpmu.io.in.valid := req_valid && ex_ctrl.fastpipe
fpmu.io.in.bits := fpiu.io.in.bits
fpmu.io.lt := fpiu.io.out.bits.lt
val divSqrt_wen = WireDefault(false.B)
val divSqrt_inFlight = WireDefault(false.B)
val divSqrt_waddr = Reg(UInt(5.W))
val divSqrt_cp = Reg(Bool())
val divSqrt_typeTag = Wire(UInt(log2Up(floatTypes.size).W))
val divSqrt_wdata = Wire(UInt((fLen+1).W))
val divSqrt_flags = Wire(UInt(FPConstants.FLAGS_SZ.W))
divSqrt_typeTag := DontCare
divSqrt_wdata := DontCare
divSqrt_flags := DontCare
// writeback arbitration
case class Pipe(p: Module, lat: Int, cond: (FPUCtrlSigs) => Bool, res: FPResult)
val pipes = List(
Pipe(fpmu, fpmu.latency, (c: FPUCtrlSigs) => c.fastpipe, fpmu.io.out.bits),
Pipe(ifpu, ifpu.latency, (c: FPUCtrlSigs) => c.fromint, ifpu.io.out.bits),
Pipe(sfma, sfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === S, sfma.io.out.bits)) ++
(fLen > 32).option({
val dfma = Module(new FPUFMAPipe(cfg.dfmaLatency, FType.D))
dfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === D
dfma.io.in.bits := fuInput(Some(dfma.t))
Pipe(dfma, dfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === D, dfma.io.out.bits)
}) ++
(minFLen == 16).option({
val hfma = Module(new FPUFMAPipe(cfg.sfmaLatency, FType.H))
hfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === H
hfma.io.in.bits := fuInput(Some(hfma.t))
Pipe(hfma, hfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === H, hfma.io.out.bits)
})
def latencyMask(c: FPUCtrlSigs, offset: Int) = {
require(pipes.forall(_.lat >= offset))
pipes.map(p => Mux(p.cond(c), (1 << p.lat-offset).U, 0.U)).reduce(_|_)
}
def pipeid(c: FPUCtrlSigs) = pipes.zipWithIndex.map(p => Mux(p._1.cond(c), p._2.U, 0.U)).reduce(_|_)
val maxLatency = pipes.map(_.lat).max
val memLatencyMask = latencyMask(mem_ctrl, 2)
class WBInfo extends Bundle {
val rd = UInt(5.W)
val typeTag = UInt(log2Up(floatTypes.size).W)
val cp = Bool()
val pipeid = UInt(log2Ceil(pipes.size).W)
}
val wen = RegInit(0.U((maxLatency-1).W))
val wbInfo = Reg(Vec(maxLatency-1, new WBInfo))
val mem_wen = mem_reg_valid && (mem_ctrl.fma || mem_ctrl.fastpipe || mem_ctrl.fromint)
val write_port_busy = RegEnable(mem_wen && (memLatencyMask & latencyMask(ex_ctrl, 1)).orR || (wen & latencyMask(ex_ctrl, 0)).orR, req_valid)
ccover(mem_reg_valid && write_port_busy, "WB_STRUCTURAL", "structural hazard on writeback")
for (i <- 0 until maxLatency-2) {
when (wen(i+1)) { wbInfo(i) := wbInfo(i+1) }
}
wen := wen >> 1
when (mem_wen) {
when (!killm) {
wen := wen >> 1 | memLatencyMask
}
for (i <- 0 until maxLatency-1) {
when (!write_port_busy && memLatencyMask(i)) {
wbInfo(i).cp := mem_cp_valid
wbInfo(i).typeTag := mem_ctrl.typeTagOut
wbInfo(i).pipeid := pipeid(mem_ctrl)
wbInfo(i).rd := mem_reg_inst(11,7)
}
}
}
val waddr = Mux(divSqrt_wen, divSqrt_waddr, wbInfo(0).rd)
val wb_cp = Mux(divSqrt_wen, divSqrt_cp, wbInfo(0).cp)
val wtypeTag = Mux(divSqrt_wen, divSqrt_typeTag, wbInfo(0).typeTag)
val wdata = box(Mux(divSqrt_wen, divSqrt_wdata, (pipes.map(_.res.data): Seq[UInt])(wbInfo(0).pipeid)), wtypeTag)
val wexc = (pipes.map(_.res.exc): Seq[UInt])(wbInfo(0).pipeid)
when ((!wbInfo(0).cp && wen(0)) || divSqrt_wen) {
assert(consistent(wdata))
regfile(waddr) := wdata
if (enableCommitLog) {
printf("f%d p%d 0x%x\n", waddr, waddr + 32.U, ieee(wdata))
}
frfWriteBundle(1).wrdst := waddr
frfWriteBundle(1).wrenf := true.B
frfWriteBundle(1).wrdata := ieee(wdata)
}
if (useDebugROB) {
DebugROB.pushWb(clock, reset, io.hartid, (!wbInfo(0).cp && wen(0)) || divSqrt_wen, waddr + 32.U, ieee(wdata))
}
when (wb_cp && (wen(0) || divSqrt_wen)) {
io.cp_resp.bits.data := wdata
io.cp_resp.valid := true.B
}
assert(!io.cp_req.valid || pipes.forall(_.lat == pipes.head.lat).B,
s"FPU only supports coprocessor if FMA pipes have uniform latency ${pipes.map(_.lat)}")
// Avoid structural hazards and nacking of external requests
// toint responds in the MEM stage, so an incoming toint can induce a structural hazard against inflight FMAs
io.cp_req.ready := !ex_reg_valid && !(cp_ctrl.toint && wen =/= 0.U) && !divSqrt_inFlight
val wb_toint_valid = wb_reg_valid && wb_ctrl.toint
val wb_toint_exc = RegEnable(fpiu.io.out.bits.exc, mem_ctrl.toint)
io.fcsr_flags.valid := wb_toint_valid || divSqrt_wen || wen(0)
io.fcsr_flags.bits :=
Mux(wb_toint_valid, wb_toint_exc, 0.U) |
Mux(divSqrt_wen, divSqrt_flags, 0.U) |
Mux(wen(0), wexc, 0.U)
val divSqrt_write_port_busy = (mem_ctrl.div || mem_ctrl.sqrt) && wen.orR
io.fcsr_rdy := !(ex_reg_valid && ex_ctrl.wflags || mem_reg_valid && mem_ctrl.wflags || wb_reg_valid && wb_ctrl.toint || wen.orR || divSqrt_inFlight)
io.nack_mem := (write_port_busy || divSqrt_write_port_busy || divSqrt_inFlight) && !mem_cp_valid
io.dec <> id_ctrl
def useScoreboard(f: ((Pipe, Int)) => Bool) = pipes.zipWithIndex.filter(_._1.lat > 3).map(x => f(x)).fold(false.B)(_||_)
io.sboard_set := wb_reg_valid && !wb_cp_valid && RegNext(useScoreboard(_._1.cond(mem_ctrl)) || mem_ctrl.div || mem_ctrl.sqrt || mem_ctrl.vec)
io.sboard_clr := !wb_cp_valid && (divSqrt_wen || (wen(0) && useScoreboard(x => wbInfo(0).pipeid === x._2.U)))
io.sboard_clra := waddr
ccover(io.sboard_clr && load_wb, "DUAL_WRITEBACK", "load and FMA writeback on same cycle")
// we don't currently support round-max-magnitude (rm=4)
io.illegal_rm := io.inst(14,12).isOneOf(5.U, 6.U) || io.inst(14,12) === 7.U && io.fcsr_rm >= 5.U
if (cfg.divSqrt) {
val divSqrt_inValid = mem_reg_valid && (mem_ctrl.div || mem_ctrl.sqrt) && !divSqrt_inFlight
val divSqrt_killed = RegNext(divSqrt_inValid && killm, true.B)
when (divSqrt_inValid) {
divSqrt_waddr := mem_reg_inst(11,7)
divSqrt_cp := mem_cp_valid
}
ccover(divSqrt_inFlight && divSqrt_killed, "DIV_KILLED", "divide killed after issued to divider")
ccover(divSqrt_inFlight && mem_reg_valid && (mem_ctrl.div || mem_ctrl.sqrt), "DIV_BUSY", "divider structural hazard")
ccover(mem_reg_valid && divSqrt_write_port_busy, "DIV_WB_STRUCTURAL", "structural hazard on division writeback")
for (t <- floatTypes) {
val tag = mem_ctrl.typeTagOut
val divSqrt = withReset(divSqrt_killed) { Module(new hardfloat.DivSqrtRecFN_small(t.exp, t.sig, 0)) }
divSqrt.io.inValid := divSqrt_inValid && tag === typeTag(t).U
divSqrt.io.sqrtOp := mem_ctrl.sqrt
divSqrt.io.a := maxType.unsafeConvert(fpiu.io.out.bits.in.in1, t)
divSqrt.io.b := maxType.unsafeConvert(fpiu.io.out.bits.in.in2, t)
divSqrt.io.roundingMode := fpiu.io.out.bits.in.rm
divSqrt.io.detectTininess := hardfloat.consts.tininess_afterRounding
when (!divSqrt.io.inReady) { divSqrt_inFlight := true.B } // only 1 in flight
when (divSqrt.io.outValid_div || divSqrt.io.outValid_sqrt) {
divSqrt_wen := !divSqrt_killed
divSqrt_wdata := sanitizeNaN(divSqrt.io.out, t)
divSqrt_flags := divSqrt.io.exceptionFlags
divSqrt_typeTag := typeTag(t).U
}
}
when (divSqrt_killed) { divSqrt_inFlight := false.B }
} else {
when (id_ctrl.div || id_ctrl.sqrt) { io.illegal_rm := true.B }
}
// gate the clock
clock_en_reg := !useClockGating.B ||
io.keep_clock_enabled || // chicken bit
io.valid || // ID stage
req_valid || // EX stage
mem_reg_valid || mem_cp_valid || // MEM stage
wb_reg_valid || wb_cp_valid || // WB stage
wen.orR || divSqrt_inFlight || // post-WB stage
io.ll_resp_val // load writeback
} // leaving gated-clock domain
val fpuImpl = withClock (gated_clock) { new FPUImpl }
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
property.cover(cond, s"FPU_$label", "Core;;" + desc)
} | module regfile_32x65(
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [64:0] R0_data,
input [4:0] R1_addr,
input R1_en,
input R1_clk,
output [64:0] R1_data,
input [4:0] R2_addr,
input R2_en,
input R2_clk,
output [64:0] R2_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [64:0] W0_data,
input [4:0] W1_addr,
input W1_en,
input W1_clk,
input [64:0] W1_data
);
reg [64:0] Memory[0:31];
always @(posedge W0_clk) begin
if (W0_en & 1'h1)
Memory[W0_addr] <= W0_data;
if (W1_en & 1'h1)
Memory[W1_addr] <= W1_data;
end
assign R0_data = R0_en ? Memory[R0_addr] : 65'bx;
assign R1_data = R1_en ? Memory[R1_addr] : 65'bx;
assign R2_data = R2_en ? Memory[R2_addr] : 65'bx;
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{Decoupled, DecoupledIO}
// A Repeater passes its input to its output, unless repeat is asserted.
// When repeat is asserted, the Repeater copies the input and repeats it next cycle.
class Repeater[T <: Data](gen: T) extends Module
{
override def desiredName = s"Repeater_${gen.typeName}"
val io = IO( new Bundle {
val repeat = Input(Bool())
val full = Output(Bool())
val enq = Flipped(Decoupled(gen.cloneType))
val deq = Decoupled(gen.cloneType)
} )
val full = RegInit(false.B)
val saved = Reg(gen.cloneType)
// When !full, a repeater is pass-through
io.deq.valid := io.enq.valid || full
io.enq.ready := io.deq.ready && !full
io.deq.bits := Mux(full, saved, io.enq.bits)
io.full := full
when (io.enq.fire && io.repeat) { full := true.B; saved := io.enq.bits }
when (io.deq.fire && !io.repeat) { full := false.B }
}
object Repeater
{
def apply[T <: Data](enq: DecoupledIO[T], repeat: Bool): DecoupledIO[T] = {
val repeater = Module(new Repeater(chiselTypeOf(enq.bits)))
repeater.io.repeat := repeat
repeater.io.enq <> enq
repeater.io.deq
}
} | module Repeater_TLBundleA_a29d64s7k1z3u(
input clock,
input reset,
input io_repeat,
output io_full,
output io_enq_ready,
input io_enq_valid,
input [2:0] io_enq_bits_opcode,
input [2:0] io_enq_bits_param,
input [2:0] io_enq_bits_size,
input [6:0] io_enq_bits_source,
input [28:0] io_enq_bits_address,
input [7:0] io_enq_bits_mask,
input io_enq_bits_corrupt,
input io_deq_ready,
output io_deq_valid,
output [2:0] io_deq_bits_opcode,
output [2:0] io_deq_bits_param,
output [2:0] io_deq_bits_size,
output [6:0] io_deq_bits_source,
output [28:0] io_deq_bits_address,
output [7:0] io_deq_bits_mask,
output io_deq_bits_corrupt
);
reg full;
reg [2:0] saved_opcode;
reg [2:0] saved_param;
reg [2:0] saved_size;
reg [6:0] saved_source;
reg [28:0] saved_address;
reg [7:0] saved_mask;
reg saved_corrupt;
wire io_deq_valid_0 = io_enq_valid | full;
wire io_enq_ready_0 = io_deq_ready & ~full;
wire _GEN = io_enq_ready_0 & io_enq_valid & io_repeat;
always @(posedge clock) begin
if (reset)
full <= 1'h0;
else
full <= ~(io_deq_ready & io_deq_valid_0 & ~io_repeat) & (_GEN | full);
if (_GEN) begin
saved_opcode <= io_enq_bits_opcode;
saved_param <= io_enq_bits_param;
saved_size <= io_enq_bits_size;
saved_source <= io_enq_bits_source;
saved_address <= io_enq_bits_address;
saved_mask <= io_enq_bits_mask;
saved_corrupt <= io_enq_bits_corrupt;
end
end
assign io_full = full;
assign io_enq_ready = io_enq_ready_0;
assign io_deq_valid = io_deq_valid_0;
assign io_deq_bits_opcode = full ? saved_opcode : io_enq_bits_opcode;
assign io_deq_bits_param = full ? saved_param : io_enq_bits_param;
assign io_deq_bits_size = full ? saved_size : io_enq_bits_size;
assign io_deq_bits_source = full ? saved_source : io_enq_bits_source;
assign io_deq_bits_address = full ? saved_address : io_enq_bits_address;
assign io_deq_bits_mask = full ? saved_mask : io_enq_bits_mask;
assign io_deq_bits_corrupt = full ? saved_corrupt : io_enq_bits_corrupt;
endmodule |
Generate the Verilog code corresponding to this Chisel code /*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2017 SiFive, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of SiFive nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY SIFIVE AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL SIFIVE OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
/*
s = sigWidth
c_i = newBit
Division:
width of a is (s+2)
Normal
------
(qi + ci * 2^(-i))*b <= a
q0 = 0
r0 = a
q(i+1) = qi + ci*2^(-i)
ri = a - qi*b
r(i+1) = a - q(i+1)*b
= a - qi*b - ci*2^(-i)*b
r(i+1) = ri - ci*2^(-i)*b
ci = ri >= 2^(-i)*b
summary_i = ri != 0
i = 0 to s+1
(s+1)th bit plus summary_(i+1) gives enough information for rounding
If (a < b), then we need to calculate (s+2)th bit and summary_(i+1)
because we need s bits ignoring the leading zero. (This is skipCycle2
part of Hauser's code.)
Hauser
------
sig_i = qi
rem_i = 2^(i-2)*ri
cycle_i = s+3-i
sig_0 = 0
rem_0 = a/4
cycle_0 = s+3
bit_0 = 2^0 (= 2^(s+1), since we represent a, b and q with (s+2) bits)
sig(i+1) = sig(i) + ci*bit_i
rem(i+1) = 2rem_i - ci*b/2
ci = 2rem_i >= b/2
bit_i = 2^-i (=2^(cycle_i-2), since we represent a, b and q with (s+2) bits)
cycle(i+1) = cycle_i-1
summary_1 = a <> b
summary(i+1) = if ci then 2rem_i-b/2 <> 0 else summary_i, i <> 0
Proof:
2^i*r(i+1) = 2^i*ri - ci*b. Qed
ci = 2^i*ri >= b. Qed
summary(i+1) = if ci then rem(i+1) else summary_i, i <> 0
Now, note that all of ck's cannot be 0, since that means
a is 0. So when you traverse through a chain of 0 ck's,
from the end,
eventually, you reach a non-zero cj. That is exactly the
value of ri as the reminder remains the same. When all ck's
are 0 except c0 (which must be 1) then summary_1 is set
correctly according
to r1 = a-b != 0. So summary(i+1) is always set correctly
according to r(i+1)
Square root:
width of a is (s+1)
Normal
------
(xi + ci*2^(-i))^2 <= a
xi^2 + ci*2^(-i)*(2xi+ci*2^(-i)) <= a
x0 = 0
x(i+1) = xi + ci*2^(-i)
ri = a - xi^2
r(i+1) = a - x(i+1)^2
= a - (xi^2 + ci*2^(-i)*(2xi+ci*2^(-i)))
= ri - ci*2^(-i)*(2xi+ci*2^(-i))
= ri - ci*2^(-i)*(2xi+2^(-i)) // ci is always 0 or 1
ci = ri >= 2^(-i)*(2xi + 2^(-i))
summary_i = ri != 0
i = 0 to s+1
For odd expression, do 2 steps initially.
(s+1)th bit plus summary_(i+1) gives enough information for rounding.
Hauser
------
sig_i = xi
rem_i = ri*2^(i-1)
cycle_i = s+2-i
bit_i = 2^(-i) (= 2^(s-i) = 2^(cycle_i-2) in terms of bit representation)
sig_0 = 0
rem_0 = a/2
cycle_0 = s+2
bit_0 = 1 (= 2^s in terms of bit representation)
sig(i+1) = sig_i + ci * bit_i
rem(i+1) = 2rem_i - ci*(2sig_i + bit_i)
ci = 2*sig_i + bit_i <= 2*rem_i
bit_i = 2^(cycle_i-2) (in terms of bit representation)
cycle(i+1) = cycle_i-1
summary_1 = a - (2^s) (in terms of bit representation)
summary(i+1) = if ci then rem(i+1) <> 0 else summary_i, i <> 0
Proof:
ci = 2*sig_i + bit_i <= 2*rem_i
ci = 2xi + 2^(-i) <= ri*2^i. Qed
sig(i+1) = sig_i + ci * bit_i
x(i+1) = xi + ci*2^(-i). Qed
rem(i+1) = 2rem_i - ci*(2sig_i + bit_i)
r(i+1)*2^i = ri*2^i - ci*(2xi + 2^(-i))
r(i+1) = ri - ci*2^(-i)*(2xi + 2^(-i)). Qed
Same argument as before for summary.
------------------------------
Note that all registers are updated normally until cycle == 2.
At cycle == 2, rem is not updated, but all other registers are updated normally.
But, cycle == 1 does not read rem to calculate anything (note that final summary
is calculated using the values at cycle = 2).
*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
/*----------------------------------------------------------------------------
| Computes a division or square root for floating-point in recoded form.
| Multiple clock cycles are needed for each division or square-root operation,
| except possibly in special cases.
*----------------------------------------------------------------------------*/
class
DivSqrtRawFN_small(expWidth: Int, sigWidth: Int, options: Int)
extends Module
{
override def desiredName = s"DivSqrtRawFN_small_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
/*--------------------------------------------------------------------
*--------------------------------------------------------------------*/
val inReady = Output(Bool())
val inValid = Input(Bool())
val sqrtOp = Input(Bool())
val a = Input(new RawFloat(expWidth, sigWidth))
val b = Input(new RawFloat(expWidth, sigWidth))
val roundingMode = Input(UInt(3.W))
/*--------------------------------------------------------------------
*--------------------------------------------------------------------*/
val rawOutValid_div = Output(Bool())
val rawOutValid_sqrt = Output(Bool())
val roundingModeOut = Output(UInt(3.W))
val invalidExc = Output(Bool())
val infiniteExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
val cycleNum = RegInit(0.U(log2Ceil(sigWidth + 3).W))
val inReady = RegInit(true.B) // <-> (cycleNum <= 1)
val rawOutValid = RegInit(false.B) // <-> (cycleNum === 1)
val sqrtOp_Z = Reg(Bool())
val majorExc_Z = Reg(Bool())
//*** REDUCE 3 BITS TO 2-BIT CODE:
val isNaN_Z = Reg(Bool())
val isInf_Z = Reg(Bool())
val isZero_Z = Reg(Bool())
val sign_Z = Reg(Bool())
val sExp_Z = Reg(SInt((expWidth + 2).W))
val fractB_Z = Reg(UInt(sigWidth.W))
val roundingMode_Z = Reg(UInt(3.W))
/*------------------------------------------------------------------------
| (The most-significant and least-significant bits of 'rem_Z' are needed
| only for square roots.)
*------------------------------------------------------------------------*/
val rem_Z = Reg(UInt((sigWidth + 2).W))
val notZeroRem_Z = Reg(Bool())
val sigX_Z = Reg(UInt((sigWidth + 2).W))
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
val rawA_S = io.a
val rawB_S = io.b
//*** IMPROVE THESE:
val notSigNaNIn_invalidExc_S_div =
(rawA_S.isZero && rawB_S.isZero) || (rawA_S.isInf && rawB_S.isInf)
val notSigNaNIn_invalidExc_S_sqrt =
! rawA_S.isNaN && ! rawA_S.isZero && rawA_S.sign
val majorExc_S =
Mux(io.sqrtOp,
isSigNaNRawFloat(rawA_S) || notSigNaNIn_invalidExc_S_sqrt,
isSigNaNRawFloat(rawA_S) || isSigNaNRawFloat(rawB_S) ||
notSigNaNIn_invalidExc_S_div ||
(! rawA_S.isNaN && ! rawA_S.isInf && rawB_S.isZero)
)
val isNaN_S =
Mux(io.sqrtOp,
rawA_S.isNaN || notSigNaNIn_invalidExc_S_sqrt,
rawA_S.isNaN || rawB_S.isNaN || notSigNaNIn_invalidExc_S_div
)
val isInf_S = Mux(io.sqrtOp, rawA_S.isInf, rawA_S.isInf || rawB_S.isZero)
val isZero_S = Mux(io.sqrtOp, rawA_S.isZero, rawA_S.isZero || rawB_S.isInf)
val sign_S = rawA_S.sign ^ (! io.sqrtOp && rawB_S.sign)
val specialCaseA_S = rawA_S.isNaN || rawA_S.isInf || rawA_S.isZero
val specialCaseB_S = rawB_S.isNaN || rawB_S.isInf || rawB_S.isZero
val normalCase_S_div = ! specialCaseA_S && ! specialCaseB_S
val normalCase_S_sqrt = ! specialCaseA_S && ! rawA_S.sign
val normalCase_S = Mux(io.sqrtOp, normalCase_S_sqrt, normalCase_S_div)
val sExpQuot_S_div =
rawA_S.sExp +&
Cat(rawB_S.sExp(expWidth), ~rawB_S.sExp(expWidth - 1, 0)).asSInt
//*** IS THIS OPTIMAL?:
val sSatExpQuot_S_div =
Cat(Mux(((BigInt(7)<<(expWidth - 2)).S <= sExpQuot_S_div),
6.U,
sExpQuot_S_div(expWidth + 1, expWidth - 2)
),
sExpQuot_S_div(expWidth - 3, 0)
).asSInt
val evenSqrt_S = io.sqrtOp && ! rawA_S.sExp(0)
val oddSqrt_S = io.sqrtOp && rawA_S.sExp(0)
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
val idle = cycleNum === 0.U
val entering = inReady && io.inValid
val entering_normalCase = entering && normalCase_S
val processTwoBits = cycleNum >= 3.U && ((options & divSqrtOpt_twoBitsPerCycle) != 0).B
val skipCycle2 = cycleNum === 3.U && sigX_Z(sigWidth + 1) && ((options & divSqrtOpt_twoBitsPerCycle) == 0).B
when (! idle || entering) {
def computeCycleNum(f: UInt => UInt): UInt = {
Mux(entering & ! normalCase_S, f(1.U), 0.U) |
Mux(entering_normalCase,
Mux(io.sqrtOp,
Mux(rawA_S.sExp(0), f(sigWidth.U), f((sigWidth + 1).U)),
f((sigWidth + 2).U)
),
0.U
) |
Mux(! entering && ! skipCycle2, f(cycleNum - Mux(processTwoBits, 2.U, 1.U)), 0.U) |
Mux(skipCycle2, f(1.U), 0.U)
}
inReady := computeCycleNum(_ <= 1.U).asBool
rawOutValid := computeCycleNum(_ === 1.U).asBool
cycleNum := computeCycleNum(x => x)
}
io.inReady := inReady
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
when (entering) {
sqrtOp_Z := io.sqrtOp
majorExc_Z := majorExc_S
isNaN_Z := isNaN_S
isInf_Z := isInf_S
isZero_Z := isZero_S
sign_Z := sign_S
sExp_Z :=
Mux(io.sqrtOp,
(rawA_S.sExp>>1) +& (BigInt(1)<<(expWidth - 1)).S,
sSatExpQuot_S_div
)
roundingMode_Z := io.roundingMode
}
when (entering || ! inReady && sqrtOp_Z) {
fractB_Z :=
Mux(inReady && ! io.sqrtOp, rawB_S.sig(sigWidth - 2, 0)<<1, 0.U) |
Mux(inReady && io.sqrtOp && rawA_S.sExp(0), (BigInt(1)<<(sigWidth - 2)).U, 0.U) |
Mux(inReady && io.sqrtOp && ! rawA_S.sExp(0), (BigInt(1)<<(sigWidth - 1)).U, 0.U) |
Mux(! inReady /* sqrtOp_Z */ && processTwoBits, fractB_Z>>2, 0.U) |
Mux(! inReady /* sqrtOp_Z */ && ! processTwoBits, fractB_Z>>1, 0.U)
}
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
val rem =
Mux(inReady && ! oddSqrt_S, rawA_S.sig<<1, 0.U) |
Mux(inReady && oddSqrt_S,
Cat(rawA_S.sig(sigWidth - 1, sigWidth - 2) - 1.U,
rawA_S.sig(sigWidth - 3, 0)<<3
),
0.U
) |
Mux(! inReady, rem_Z<<1, 0.U)
val bitMask = (1.U<<cycleNum)>>2
val trialTerm =
Mux(inReady && ! io.sqrtOp, rawB_S.sig<<1, 0.U) |
Mux(inReady && evenSqrt_S, (BigInt(1)<<sigWidth).U, 0.U) |
Mux(inReady && oddSqrt_S, (BigInt(5)<<(sigWidth - 1)).U, 0.U) |
Mux(! inReady, fractB_Z, 0.U) |
Mux(! inReady && ! sqrtOp_Z, 1.U << sigWidth, 0.U) |
Mux(! inReady && sqrtOp_Z, sigX_Z<<1, 0.U)
val trialRem = rem.zext -& trialTerm.zext
val newBit = (0.S <= trialRem)
val nextRem_Z = Mux(newBit, trialRem.asUInt, rem)(sigWidth + 1, 0)
val rem2 = nextRem_Z<<1
val trialTerm2_newBit0 = Mux(sqrtOp_Z, fractB_Z>>1 | sigX_Z<<1, fractB_Z | (1.U << sigWidth))
val trialTerm2_newBit1 = trialTerm2_newBit0 | Mux(sqrtOp_Z, fractB_Z<<1, 0.U)
val trialRem2 =
Mux(newBit,
(trialRem<<1) - trialTerm2_newBit1.zext,
(rem_Z<<2)(sigWidth+2, 0).zext - trialTerm2_newBit0.zext)
val newBit2 = (0.S <= trialRem2)
val nextNotZeroRem_Z = Mux(inReady || newBit, trialRem =/= 0.S, notZeroRem_Z)
val nextNotZeroRem_Z_2 = // <-> Mux(newBit2, trialRem2 =/= 0.S, nextNotZeroRem_Z)
processTwoBits && newBit && (0.S < (trialRem<<1) - trialTerm2_newBit1.zext) ||
processTwoBits && !newBit && (0.S < (rem_Z<<2)(sigWidth+2, 0).zext - trialTerm2_newBit0.zext) ||
!(processTwoBits && newBit2) && nextNotZeroRem_Z
val nextRem_Z_2 =
Mux(processTwoBits && newBit2, trialRem2.asUInt(sigWidth + 1, 0), 0.U) |
Mux(processTwoBits && !newBit2, rem2(sigWidth + 1, 0), 0.U) |
Mux(!processTwoBits, nextRem_Z, 0.U)
when (entering || ! inReady) {
notZeroRem_Z := nextNotZeroRem_Z_2
rem_Z := nextRem_Z_2
sigX_Z :=
Mux(inReady && ! io.sqrtOp, newBit<<(sigWidth + 1), 0.U) |
Mux(inReady && io.sqrtOp, (BigInt(1)<<sigWidth).U, 0.U) |
Mux(inReady && oddSqrt_S, newBit<<(sigWidth - 1), 0.U) |
Mux(! inReady, sigX_Z, 0.U) |
Mux(! inReady && newBit, bitMask, 0.U) |
Mux(processTwoBits && newBit2, bitMask>>1, 0.U)
}
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
io.rawOutValid_div := rawOutValid && ! sqrtOp_Z
io.rawOutValid_sqrt := rawOutValid && sqrtOp_Z
io.roundingModeOut := roundingMode_Z
io.invalidExc := majorExc_Z && isNaN_Z
io.infiniteExc := majorExc_Z && ! isNaN_Z
io.rawOut.isNaN := isNaN_Z
io.rawOut.isInf := isInf_Z
io.rawOut.isZero := isZero_Z
io.rawOut.sign := sign_Z
io.rawOut.sExp := sExp_Z
io.rawOut.sig := sigX_Z<<1 | notZeroRem_Z
}
/*----------------------------------------------------------------------------
*----------------------------------------------------------------------------*/
class
DivSqrtRecFNToRaw_small(expWidth: Int, sigWidth: Int, options: Int)
extends Module
{
override def desiredName = s"DivSqrtRecFMToRaw_small_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
/*--------------------------------------------------------------------
*--------------------------------------------------------------------*/
val inReady = Output(Bool())
val inValid = Input(Bool())
val sqrtOp = Input(Bool())
val a = Input(UInt((expWidth + sigWidth + 1).W))
val b = Input(UInt((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
/*--------------------------------------------------------------------
*--------------------------------------------------------------------*/
val rawOutValid_div = Output(Bool())
val rawOutValid_sqrt = Output(Bool())
val roundingModeOut = Output(UInt(3.W))
val invalidExc = Output(Bool())
val infiniteExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
val divSqrtRawFN =
Module(new DivSqrtRawFN_small(expWidth, sigWidth, options))
io.inReady := divSqrtRawFN.io.inReady
divSqrtRawFN.io.inValid := io.inValid
divSqrtRawFN.io.sqrtOp := io.sqrtOp
divSqrtRawFN.io.a := rawFloatFromRecFN(expWidth, sigWidth, io.a)
divSqrtRawFN.io.b := rawFloatFromRecFN(expWidth, sigWidth, io.b)
divSqrtRawFN.io.roundingMode := io.roundingMode
io.rawOutValid_div := divSqrtRawFN.io.rawOutValid_div
io.rawOutValid_sqrt := divSqrtRawFN.io.rawOutValid_sqrt
io.roundingModeOut := divSqrtRawFN.io.roundingModeOut
io.invalidExc := divSqrtRawFN.io.invalidExc
io.infiniteExc := divSqrtRawFN.io.infiniteExc
io.rawOut := divSqrtRawFN.io.rawOut
}
/*----------------------------------------------------------------------------
*----------------------------------------------------------------------------*/
class
DivSqrtRecFN_small(expWidth: Int, sigWidth: Int, options: Int)
extends Module
{
override def desiredName = s"DivSqrtRecFM_small_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
/*--------------------------------------------------------------------
*--------------------------------------------------------------------*/
val inReady = Output(Bool())
val inValid = Input(Bool())
val sqrtOp = Input(Bool())
val a = Input(UInt((expWidth + sigWidth + 1).W))
val b = Input(UInt((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
/*--------------------------------------------------------------------
*--------------------------------------------------------------------*/
val outValid_div = Output(Bool())
val outValid_sqrt = Output(Bool())
val out = Output(UInt((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(UInt(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val divSqrtRecFNToRaw =
Module(new DivSqrtRecFNToRaw_small(expWidth, sigWidth, options))
io.inReady := divSqrtRecFNToRaw.io.inReady
divSqrtRecFNToRaw.io.inValid := io.inValid
divSqrtRecFNToRaw.io.sqrtOp := io.sqrtOp
divSqrtRecFNToRaw.io.a := io.a
divSqrtRecFNToRaw.io.b := io.b
divSqrtRecFNToRaw.io.roundingMode := io.roundingMode
//------------------------------------------------------------------------
//------------------------------------------------------------------------
io.outValid_div := divSqrtRecFNToRaw.io.rawOutValid_div
io.outValid_sqrt := divSqrtRecFNToRaw.io.rawOutValid_sqrt
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := divSqrtRecFNToRaw.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := divSqrtRecFNToRaw.io.infiniteExc
roundRawFNToRecFN.io.in := divSqrtRecFNToRaw.io.rawOut
roundRawFNToRecFN.io.roundingMode := divSqrtRecFNToRaw.io.roundingModeOut
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
} | module DivSqrtRawFN_small_e11_s53(
input clock,
input reset,
output io_inReady,
input io_inValid,
input io_sqrtOp,
input io_a_isNaN,
input io_a_isInf,
input io_a_isZero,
input io_a_sign,
input [12:0] io_a_sExp,
input [53:0] io_a_sig,
input io_b_isNaN,
input io_b_isInf,
input io_b_isZero,
input io_b_sign,
input [12:0] io_b_sExp,
input [53:0] io_b_sig,
input [2:0] io_roundingMode,
output io_rawOutValid_div,
output io_rawOutValid_sqrt,
output [2:0] io_roundingModeOut,
output io_invalidExc,
output io_infiniteExc,
output io_rawOut_isNaN,
output io_rawOut_isInf,
output io_rawOut_isZero,
output io_rawOut_sign,
output [12:0] io_rawOut_sExp,
output [55:0] io_rawOut_sig
);
reg [5:0] cycleNum;
reg inReady;
reg rawOutValid;
reg sqrtOp_Z;
reg majorExc_Z;
reg isNaN_Z;
reg isInf_Z;
reg isZero_Z;
reg sign_Z;
reg [12:0] sExp_Z;
reg [52:0] fractB_Z;
reg [2:0] roundingMode_Z;
reg [54:0] rem_Z;
reg notZeroRem_Z;
reg [54:0] sigX_Z;
wire specialCaseA_S = io_a_isNaN | io_a_isInf | io_a_isZero;
wire normalCase_S = io_sqrtOp ? ~specialCaseA_S & ~io_a_sign : ~specialCaseA_S & ~(io_b_isNaN | io_b_isInf | io_b_isZero);
wire skipCycle2 = cycleNum == 6'h3 & sigX_Z[54];
wire notSigNaNIn_invalidExc_S_div = io_a_isZero & io_b_isZero | io_a_isInf & io_b_isInf;
wire notSigNaNIn_invalidExc_S_sqrt = ~io_a_isNaN & ~io_a_isZero & io_a_sign;
wire [13:0] sExpQuot_S_div = {io_a_sExp[12], io_a_sExp} + {{3{io_b_sExp[11]}}, ~(io_b_sExp[10:0])};
wire [52:0] _fractB_Z_T_4 = inReady & ~io_sqrtOp ? {io_b_sig[51:0], 1'h0} : 53'h0;
wire _fractB_Z_T_10 = inReady & io_sqrtOp;
wire [63:0] _bitMask_T = 64'h1 << cycleNum;
wire oddSqrt_S = io_sqrtOp & io_a_sExp[0];
wire entering = inReady & io_inValid;
wire _sigX_Z_T_7 = inReady & oddSqrt_S;
wire [55:0] rem = {1'h0, inReady & ~oddSqrt_S ? {io_a_sig, 1'h0} : 55'h0} | (_sigX_Z_T_7 ? {io_a_sig[52:51] - 2'h1, io_a_sig[50:0], 3'h0} : 56'h0) | (inReady ? 56'h0 : {rem_Z, 1'h0});
wire [54:0] _trialTerm_T_3 = inReady & ~io_sqrtOp ? {io_b_sig, 1'h0} : 55'h0;
wire [54:0] _trialTerm_T_9 = {_trialTerm_T_3[54], _trialTerm_T_3[53:0] | {inReady & io_sqrtOp & ~(io_a_sExp[0]), 53'h0}} | (_sigX_Z_T_7 ? 55'h50000000000000 : 55'h0);
wire [57:0] trialRem = {2'h0, rem} - {2'h0, {1'h0, _trialTerm_T_9[54], _trialTerm_T_9[53] | ~inReady & ~sqrtOp_Z, _trialTerm_T_9[52:0] | (inReady ? 53'h0 : fractB_Z)} | (~inReady & sqrtOp_Z ? {sigX_Z, 1'h0} : 56'h0)};
wire newBit = $signed(trialRem) > -58'sh1;
wire _GEN = entering | ~inReady;
wire [5:0] _cycleNum_T_15 = {5'h0, entering & ~normalCase_S} | (entering & normalCase_S ? (io_sqrtOp ? (io_a_sExp[0] ? 6'h35 : 6'h36) : 6'h37) : 6'h0) | (entering | skipCycle2 ? 6'h0 : cycleNum - 6'h1);
wire [54:0] _sigX_Z_T_3 = inReady & ~io_sqrtOp ? {newBit, 54'h0} : 55'h0;
wire [53:0] _GEN_0 = _sigX_Z_T_3[53:0] | {inReady & io_sqrtOp, 53'h0};
always @(posedge clock) begin
if (reset) begin
cycleNum <= 6'h0;
inReady <= 1'h1;
rawOutValid <= 1'h0;
end
else if ((|cycleNum) | entering) begin
cycleNum <= {_cycleNum_T_15[5:1], _cycleNum_T_15[0] | skipCycle2};
inReady <= entering & ~normalCase_S | ~entering & ~skipCycle2 & cycleNum - 6'h1 < 6'h2 | skipCycle2;
rawOutValid <= entering & ~normalCase_S | ~entering & ~skipCycle2 & cycleNum - 6'h1 == 6'h1 | skipCycle2;
end
if (entering) begin
sqrtOp_Z <= io_sqrtOp;
majorExc_Z <= io_sqrtOp ? io_a_isNaN & ~(io_a_sig[51]) | notSigNaNIn_invalidExc_S_sqrt : io_a_isNaN & ~(io_a_sig[51]) | io_b_isNaN & ~(io_b_sig[51]) | notSigNaNIn_invalidExc_S_div | ~io_a_isNaN & ~io_a_isInf & io_b_isZero;
isNaN_Z <= io_sqrtOp ? io_a_isNaN | notSigNaNIn_invalidExc_S_sqrt : io_a_isNaN | io_b_isNaN | notSigNaNIn_invalidExc_S_div;
isInf_Z <= ~io_sqrtOp & io_b_isZero | io_a_isInf;
isZero_Z <= ~io_sqrtOp & io_b_isInf | io_a_isZero;
sign_Z <= io_a_sign ^ ~io_sqrtOp & io_b_sign;
sExp_Z <= io_sqrtOp ? {io_a_sExp[12], io_a_sExp[12:1]} + 13'h400 : {$signed(sExpQuot_S_div) > 14'shDFF ? 4'h6 : sExpQuot_S_div[12:9], sExpQuot_S_div[8:0]};
roundingMode_Z <= io_roundingMode;
end
if (entering | ~inReady & sqrtOp_Z)
fractB_Z <= {_fractB_Z_T_4[52] | _fractB_Z_T_10 & ~(io_a_sExp[0]), _fractB_Z_T_4[51:0] | {_fractB_Z_T_10 & io_a_sExp[0], 51'h0} | (inReady ? 52'h0 : fractB_Z[52:1])};
if (_GEN) begin
rem_Z <= newBit ? trialRem[54:0] : rem[54:0];
sigX_Z <= {_sigX_Z_T_3[54], _GEN_0[53], _GEN_0[52:0] | (_sigX_Z_T_7 ? {newBit, 52'h0} : 53'h0)} | (inReady ? 55'h0 : sigX_Z) | (~inReady & newBit ? _bitMask_T[56:2] : 55'h0);
end
if (_GEN & (inReady | newBit))
notZeroRem_Z <= |trialRem;
end
assign io_inReady = inReady;
assign io_rawOutValid_div = rawOutValid & ~sqrtOp_Z;
assign io_rawOutValid_sqrt = rawOutValid & sqrtOp_Z;
assign io_roundingModeOut = roundingMode_Z;
assign io_invalidExc = majorExc_Z & isNaN_Z;
assign io_infiniteExc = majorExc_Z & ~isNaN_Z;
assign io_rawOut_isNaN = isNaN_Z;
assign io_rawOut_isInf = isInf_Z;
assign io_rawOut_isZero = isZero_Z;
assign io_rawOut_sign = sign_Z;
assign io_rawOut_sExp = sExp_Z;
assign io_rawOut_sig = {sigX_Z, notZeroRem_Z};
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3.{Data, SyncReadMem, Vec}
import chisel3.util.log2Ceil
object DescribedSRAM {
def apply[T <: Data](
name: String,
desc: String,
size: BigInt, // depth
data: T
): SyncReadMem[T] = {
val mem = SyncReadMem(size, data)
mem.suggestName(name)
val granWidth = data match {
case v: Vec[_] => v.head.getWidth
case d => d.getWidth
}
val uid = 0
Annotated.srams(
component = mem,
name = name,
address_width = log2Ceil(size),
data_width = data.getWidth,
depth = size,
description = desc,
write_mask_granularity = granWidth
)
mem
}
} | module array_3_0_0(
input [8:0] R0_addr,
input R0_en,
input R0_clk,
output [63:0] R0_data,
input [8:0] W0_addr,
input W0_en,
input W0_clk,
input [63:0] W0_data
);
array_0_0_0_ext array_0_0_0_ext (
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data)
);
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3.{Data, SyncReadMem, Vec}
import chisel3.util.log2Ceil
object DescribedSRAM {
def apply[T <: Data](
name: String,
desc: String,
size: BigInt, // depth
data: T
): SyncReadMem[T] = {
val mem = SyncReadMem(size, data)
mem.suggestName(name)
val granWidth = data match {
case v: Vec[_] => v.head.getWidth
case d => d.getWidth
}
val uid = 0
Annotated.srams(
component = mem,
name = name,
address_width = log2Ceil(size),
data_width = data.getWidth,
depth = size,
description = desc,
write_mask_granularity = granWidth
)
mem
}
} | module cc_banks_6(
input [13:0] RW0_addr,
input RW0_en,
input RW0_clk,
input RW0_wmode,
input [31:0] RW0_wdata,
output [31:0] RW0_rdata
);
cc_banks_0_ext cc_banks_0_ext (
.RW0_addr (RW0_addr),
.RW0_en (RW0_en),
.RW0_clk (RW0_clk),
.RW0_wmode (RW0_wmode),
.RW0_wdata (RW0_wdata),
.RW0_rdata (RW0_rdata)
);
endmodule |
Generate the Verilog code corresponding to this Chisel code package boom.v3.ifu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import boom.v3.common._
import boom.v3.util.{BoomCoreStringPrefix, MaskLower, WrapInc}
import scala.math.min
class TageResp extends Bundle {
val ctr = UInt(3.W)
val u = UInt(2.W)
}
class TageTable(val nRows: Int, val tagSz: Int, val histLength: Int, val uBitPeriod: Int)
(implicit p: Parameters) extends BoomModule()(p)
with HasBoomFrontendParameters
{
require(histLength <= globalHistoryLength)
val nWrBypassEntries = 2
val io = IO( new Bundle {
val f1_req_valid = Input(Bool())
val f1_req_pc = Input(UInt(vaddrBitsExtended.W))
val f1_req_ghist = Input(UInt(globalHistoryLength.W))
val f3_resp = Output(Vec(bankWidth, Valid(new TageResp)))
val update_mask = Input(Vec(bankWidth, Bool()))
val update_taken = Input(Vec(bankWidth, Bool()))
val update_alloc = Input(Vec(bankWidth, Bool()))
val update_old_ctr = Input(Vec(bankWidth, UInt(3.W)))
val update_pc = Input(UInt())
val update_hist = Input(UInt())
val update_u_mask = Input(Vec(bankWidth, Bool()))
val update_u = Input(Vec(bankWidth, UInt(2.W)))
})
def compute_folded_hist(hist: UInt, l: Int) = {
val nChunks = (histLength + l - 1) / l
val hist_chunks = (0 until nChunks) map {i =>
hist(min((i+1)*l, histLength)-1, i*l)
}
hist_chunks.reduce(_^_)
}
def compute_tag_and_hash(unhashed_idx: UInt, hist: UInt) = {
val idx_history = compute_folded_hist(hist, log2Ceil(nRows))
val idx = (unhashed_idx ^ idx_history)(log2Ceil(nRows)-1,0)
val tag_history = compute_folded_hist(hist, tagSz)
val tag = ((unhashed_idx >> log2Ceil(nRows)) ^ tag_history)(tagSz-1,0)
(idx, tag)
}
def inc_ctr(ctr: UInt, taken: Bool): UInt = {
Mux(!taken, Mux(ctr === 0.U, 0.U, ctr - 1.U),
Mux(ctr === 7.U, 7.U, ctr + 1.U))
}
val doing_reset = RegInit(true.B)
val reset_idx = RegInit(0.U(log2Ceil(nRows).W))
reset_idx := reset_idx + doing_reset
when (reset_idx === (nRows-1).U) { doing_reset := false.B }
class TageEntry extends Bundle {
val valid = Bool() // TODO: Remove this valid bit
val tag = UInt(tagSz.W)
val ctr = UInt(3.W)
}
val tageEntrySz = 1 + tagSz + 3
val (s1_hashed_idx, s1_tag) = compute_tag_and_hash(fetchIdx(io.f1_req_pc), io.f1_req_ghist)
val hi_us = SyncReadMem(nRows, Vec(bankWidth, Bool()))
val lo_us = SyncReadMem(nRows, Vec(bankWidth, Bool()))
val table = SyncReadMem(nRows, Vec(bankWidth, UInt(tageEntrySz.W)))
val mems = Seq((f"tage_l$histLength", nRows, bankWidth * tageEntrySz))
val s2_tag = RegNext(s1_tag)
val s2_req_rtage = VecInit(table.read(s1_hashed_idx, io.f1_req_valid).map(_.asTypeOf(new TageEntry)))
val s2_req_rhius = hi_us.read(s1_hashed_idx, io.f1_req_valid)
val s2_req_rlous = lo_us.read(s1_hashed_idx, io.f1_req_valid)
val s2_req_rhits = VecInit(s2_req_rtage.map(e => e.valid && e.tag === s2_tag && !doing_reset))
for (w <- 0 until bankWidth) {
// This bit indicates the TAGE table matched here
io.f3_resp(w).valid := RegNext(s2_req_rhits(w))
io.f3_resp(w).bits.u := RegNext(Cat(s2_req_rhius(w), s2_req_rlous(w)))
io.f3_resp(w).bits.ctr := RegNext(s2_req_rtage(w).ctr)
}
val clear_u_ctr = RegInit(0.U((log2Ceil(uBitPeriod) + log2Ceil(nRows) + 1).W))
when (doing_reset) { clear_u_ctr := 1.U } .otherwise { clear_u_ctr := clear_u_ctr + 1.U }
val doing_clear_u = clear_u_ctr(log2Ceil(uBitPeriod)-1,0) === 0.U
val doing_clear_u_hi = doing_clear_u && clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 1.U
val doing_clear_u_lo = doing_clear_u && clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 0.U
val clear_u_idx = clear_u_ctr >> log2Ceil(uBitPeriod)
val (update_idx, update_tag) = compute_tag_and_hash(fetchIdx(io.update_pc), io.update_hist)
val update_wdata = Wire(Vec(bankWidth, new TageEntry))
table.write(
Mux(doing_reset, reset_idx , update_idx),
Mux(doing_reset, VecInit(Seq.fill(bankWidth) { 0.U(tageEntrySz.W) }), VecInit(update_wdata.map(_.asUInt))),
Mux(doing_reset, ~(0.U(bankWidth.W)) , io.update_mask.asUInt).asBools
)
val update_hi_wdata = Wire(Vec(bankWidth, Bool()))
hi_us.write(
Mux(doing_reset, reset_idx, Mux(doing_clear_u_hi, clear_u_idx, update_idx)),
Mux(doing_reset || doing_clear_u_hi, VecInit((0.U(bankWidth.W)).asBools), update_hi_wdata),
Mux(doing_reset || doing_clear_u_hi, ~(0.U(bankWidth.W)), io.update_u_mask.asUInt).asBools
)
val update_lo_wdata = Wire(Vec(bankWidth, Bool()))
lo_us.write(
Mux(doing_reset, reset_idx, Mux(doing_clear_u_lo, clear_u_idx, update_idx)),
Mux(doing_reset || doing_clear_u_lo, VecInit((0.U(bankWidth.W)).asBools), update_lo_wdata),
Mux(doing_reset || doing_clear_u_lo, ~(0.U(bankWidth.W)), io.update_u_mask.asUInt).asBools
)
val wrbypass_tags = Reg(Vec(nWrBypassEntries, UInt(tagSz.W)))
val wrbypass_idxs = Reg(Vec(nWrBypassEntries, UInt(log2Ceil(nRows).W)))
val wrbypass = Reg(Vec(nWrBypassEntries, Vec(bankWidth, UInt(3.W))))
val wrbypass_enq_idx = RegInit(0.U(log2Ceil(nWrBypassEntries).W))
val wrbypass_hits = VecInit((0 until nWrBypassEntries) map { i =>
!doing_reset &&
wrbypass_tags(i) === update_tag &&
wrbypass_idxs(i) === update_idx
})
val wrbypass_hit = wrbypass_hits.reduce(_||_)
val wrbypass_hit_idx = PriorityEncoder(wrbypass_hits)
for (w <- 0 until bankWidth) {
update_wdata(w).ctr := Mux(io.update_alloc(w),
Mux(io.update_taken(w), 4.U,
3.U
),
Mux(wrbypass_hit, inc_ctr(wrbypass(wrbypass_hit_idx)(w), io.update_taken(w)),
inc_ctr(io.update_old_ctr(w), io.update_taken(w))
)
)
update_wdata(w).valid := true.B
update_wdata(w).tag := update_tag
update_hi_wdata(w) := io.update_u(w)(1)
update_lo_wdata(w) := io.update_u(w)(0)
}
when (io.update_mask.reduce(_||_)) {
when (wrbypass_hits.reduce(_||_)) {
wrbypass(wrbypass_hit_idx) := VecInit(update_wdata.map(_.ctr))
} .otherwise {
wrbypass (wrbypass_enq_idx) := VecInit(update_wdata.map(_.ctr))
wrbypass_tags(wrbypass_enq_idx) := update_tag
wrbypass_idxs(wrbypass_enq_idx) := update_idx
wrbypass_enq_idx := WrapInc(wrbypass_enq_idx, nWrBypassEntries)
}
}
}
case class BoomTageParams(
// nSets, histLen, tagSz
tableInfo: Seq[Tuple3[Int, Int, Int]] = Seq(( 128, 2, 7),
( 128, 4, 7),
( 256, 8, 8),
( 256, 16, 8),
( 128, 32, 9),
( 128, 64, 9)),
uBitPeriod: Int = 2048
)
class TageBranchPredictorBank(params: BoomTageParams = BoomTageParams())(implicit p: Parameters) extends BranchPredictorBank()(p)
{
val tageUBitPeriod = params.uBitPeriod
val tageNTables = params.tableInfo.size
class TageMeta extends Bundle
{
val provider = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W)))
val alt_differs = Vec(bankWidth, Output(Bool()))
val provider_u = Vec(bankWidth, Output(UInt(2.W)))
val provider_ctr = Vec(bankWidth, Output(UInt(3.W)))
val allocate = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W)))
}
val f3_meta = Wire(new TageMeta)
override val metaSz = f3_meta.asUInt.getWidth
require(metaSz <= bpdMaxMetaLength)
def inc_u(u: UInt, alt_differs: Bool, mispredict: Bool): UInt = {
Mux(!alt_differs, u,
Mux(mispredict, Mux(u === 0.U, 0.U, u - 1.U),
Mux(u === 3.U, 3.U, u + 1.U)))
}
val tt = params.tableInfo map {
case (n, l, s) => {
val t = Module(new TageTable(n, s, l, params.uBitPeriod))
t.io.f1_req_valid := RegNext(io.f0_valid)
t.io.f1_req_pc := RegNext(io.f0_pc)
t.io.f1_req_ghist := io.f1_ghist
(t, t.mems)
}
}
val tables = tt.map(_._1)
val mems = tt.map(_._2).flatten
val f3_resps = VecInit(tables.map(_.io.f3_resp))
val s1_update_meta = s1_update.bits.meta.asTypeOf(new TageMeta)
val s1_update_mispredict_mask = UIntToOH(s1_update.bits.cfi_idx.bits) &
Fill(bankWidth, s1_update.bits.cfi_mispredicted)
val s1_update_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, Bool()))))
val s1_update_u_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, UInt(1.W)))))
val s1_update_taken = Wire(Vec(tageNTables, Vec(bankWidth, Bool())))
val s1_update_old_ctr = Wire(Vec(tageNTables, Vec(bankWidth, UInt(3.W))))
val s1_update_alloc = Wire(Vec(tageNTables, Vec(bankWidth, Bool())))
val s1_update_u = Wire(Vec(tageNTables, Vec(bankWidth, UInt(2.W))))
s1_update_taken := DontCare
s1_update_old_ctr := DontCare
s1_update_alloc := DontCare
s1_update_u := DontCare
for (w <- 0 until bankWidth) {
var altpred = io.resp_in(0).f3(w).taken
val final_altpred = WireInit(io.resp_in(0).f3(w).taken)
var provided = false.B
var provider = 0.U
io.resp.f3(w).taken := io.resp_in(0).f3(w).taken
for (i <- 0 until tageNTables) {
val hit = f3_resps(i)(w).valid
val ctr = f3_resps(i)(w).bits.ctr
when (hit) {
io.resp.f3(w).taken := Mux(ctr === 3.U || ctr === 4.U, altpred, ctr(2))
final_altpred := altpred
}
provided = provided || hit
provider = Mux(hit, i.U, provider)
altpred = Mux(hit, f3_resps(i)(w).bits.ctr(2), altpred)
}
f3_meta.provider(w).valid := provided
f3_meta.provider(w).bits := provider
f3_meta.alt_differs(w) := final_altpred =/= io.resp.f3(w).taken
f3_meta.provider_u(w) := f3_resps(provider)(w).bits.u
f3_meta.provider_ctr(w) := f3_resps(provider)(w).bits.ctr
// Create a mask of tables which did not hit our query, and also contain useless entries
// and also uses a longer history than the provider
val allocatable_slots = (
VecInit(f3_resps.map(r => !r(w).valid && r(w).bits.u === 0.U)).asUInt &
~(MaskLower(UIntToOH(provider)) & Fill(tageNTables, provided))
)
val alloc_lfsr = random.LFSR(tageNTables max 2)
val first_entry = PriorityEncoder(allocatable_slots)
val masked_entry = PriorityEncoder(allocatable_slots & alloc_lfsr)
val alloc_entry = Mux(allocatable_slots(masked_entry),
masked_entry,
first_entry)
f3_meta.allocate(w).valid := allocatable_slots =/= 0.U
f3_meta.allocate(w).bits := alloc_entry
val update_was_taken = (s1_update.bits.cfi_idx.valid &&
(s1_update.bits.cfi_idx.bits === w.U) &&
s1_update.bits.cfi_taken)
when (s1_update.bits.br_mask(w) && s1_update.valid && s1_update.bits.is_commit_update) {
when (s1_update_meta.provider(w).valid) {
val provider = s1_update_meta.provider(w).bits
s1_update_mask(provider)(w) := true.B
s1_update_u_mask(provider)(w) := true.B
val new_u = inc_u(s1_update_meta.provider_u(w),
s1_update_meta.alt_differs(w),
s1_update_mispredict_mask(w))
s1_update_u (provider)(w) := new_u
s1_update_taken (provider)(w) := update_was_taken
s1_update_old_ctr(provider)(w) := s1_update_meta.provider_ctr(w)
s1_update_alloc (provider)(w) := false.B
}
}
}
when (s1_update.valid && s1_update.bits.is_commit_update && s1_update.bits.cfi_mispredicted && s1_update.bits.cfi_idx.valid) {
val idx = s1_update.bits.cfi_idx.bits
val allocate = s1_update_meta.allocate(idx)
when (allocate.valid) {
s1_update_mask (allocate.bits)(idx) := true.B
s1_update_taken(allocate.bits)(idx) := s1_update.bits.cfi_taken
s1_update_alloc(allocate.bits)(idx) := true.B
s1_update_u_mask(allocate.bits)(idx) := true.B
s1_update_u (allocate.bits)(idx) := 0.U
} .otherwise {
val provider = s1_update_meta.provider(idx)
val decr_mask = Mux(provider.valid, ~MaskLower(UIntToOH(provider.bits)), 0.U)
for (i <- 0 until tageNTables) {
when (decr_mask(i)) {
s1_update_u_mask(i)(idx) := true.B
s1_update_u (i)(idx) := 0.U
}
}
}
}
for (i <- 0 until tageNTables) {
for (w <- 0 until bankWidth) {
tables(i).io.update_mask(w) := RegNext(s1_update_mask(i)(w))
tables(i).io.update_taken(w) := RegNext(s1_update_taken(i)(w))
tables(i).io.update_alloc(w) := RegNext(s1_update_alloc(i)(w))
tables(i).io.update_old_ctr(w) := RegNext(s1_update_old_ctr(i)(w))
tables(i).io.update_u_mask(w) := RegNext(s1_update_u_mask(i)(w))
tables(i).io.update_u(w) := RegNext(s1_update_u(i)(w))
}
tables(i).io.update_pc := RegNext(s1_update.bits.pc)
tables(i).io.update_hist := RegNext(s1_update.bits.ghist)
}
//io.f3_meta := Cat(f3_meta.asUInt, micro.io.f3_meta(micro.metaSz-1,0), base.io.f3_meta(base.metaSz-1, 0))
io.f3_meta := f3_meta.asUInt
} | module TageTable(
input clock,
input reset,
input io_f1_req_valid,
input [39:0] io_f1_req_pc,
input [63:0] io_f1_req_ghist,
output io_f3_resp_0_valid,
output [2:0] io_f3_resp_0_bits_ctr,
output [1:0] io_f3_resp_0_bits_u,
output io_f3_resp_1_valid,
output [2:0] io_f3_resp_1_bits_ctr,
output [1:0] io_f3_resp_1_bits_u,
output io_f3_resp_2_valid,
output [2:0] io_f3_resp_2_bits_ctr,
output [1:0] io_f3_resp_2_bits_u,
output io_f3_resp_3_valid,
output [2:0] io_f3_resp_3_bits_ctr,
output [1:0] io_f3_resp_3_bits_u,
input io_update_mask_0,
input io_update_mask_1,
input io_update_mask_2,
input io_update_mask_3,
input io_update_taken_0,
input io_update_taken_1,
input io_update_taken_2,
input io_update_taken_3,
input io_update_alloc_0,
input io_update_alloc_1,
input io_update_alloc_2,
input io_update_alloc_3,
input [2:0] io_update_old_ctr_0,
input [2:0] io_update_old_ctr_1,
input [2:0] io_update_old_ctr_2,
input [2:0] io_update_old_ctr_3,
input [39:0] io_update_pc,
input [63:0] io_update_hist,
input io_update_u_mask_0,
input io_update_u_mask_1,
input io_update_u_mask_2,
input io_update_u_mask_3,
input [1:0] io_update_u_0,
input [1:0] io_update_u_1,
input [1:0] io_update_u_2,
input [1:0] io_update_u_3
);
wire update_lo_wdata_3;
wire update_hi_wdata_3;
wire [2:0] update_wdata_3_ctr;
wire update_lo_wdata_2;
wire update_hi_wdata_2;
wire [2:0] update_wdata_2_ctr;
wire update_lo_wdata_1;
wire update_hi_wdata_1;
wire [2:0] update_wdata_1_ctr;
wire update_lo_wdata_0;
wire update_hi_wdata_0;
wire [2:0] update_wdata_0_ctr;
wire lo_us_MPORT_2_data_3;
wire lo_us_MPORT_2_data_2;
wire lo_us_MPORT_2_data_1;
wire lo_us_MPORT_2_data_0;
wire hi_us_MPORT_1_data_3;
wire hi_us_MPORT_1_data_2;
wire hi_us_MPORT_1_data_1;
wire hi_us_MPORT_1_data_0;
wire [10:0] table_MPORT_data_3;
wire [10:0] table_MPORT_data_2;
wire [10:0] table_MPORT_data_1;
wire [10:0] table_MPORT_data_0;
wire [43:0] _table_R0_data;
wire [3:0] _lo_us_R0_data;
wire [3:0] _hi_us_R0_data;
reg doing_reset;
reg [6:0] reset_idx;
wire [6:0] s1_hashed_idx = {io_f1_req_pc[9:5], io_f1_req_pc[4:3] ^ io_f1_req_ghist[1:0]};
reg [6:0] s2_tag;
reg io_f3_resp_0_valid_REG;
reg [1:0] io_f3_resp_0_bits_u_REG;
reg [2:0] io_f3_resp_0_bits_ctr_REG;
reg io_f3_resp_1_valid_REG;
reg [1:0] io_f3_resp_1_bits_u_REG;
reg [2:0] io_f3_resp_1_bits_ctr_REG;
reg io_f3_resp_2_valid_REG;
reg [1:0] io_f3_resp_2_bits_u_REG;
reg [2:0] io_f3_resp_2_bits_ctr_REG;
reg io_f3_resp_3_valid_REG;
reg [1:0] io_f3_resp_3_bits_u_REG;
reg [2:0] io_f3_resp_3_bits_ctr_REG;
reg [18:0] clear_u_ctr;
wire doing_clear_u = clear_u_ctr[10:0] == 11'h0;
wire doing_clear_u_hi = doing_clear_u & clear_u_ctr[18];
wire doing_clear_u_lo = doing_clear_u & ~(clear_u_ctr[18]);
wire [1:0] _GEN = io_update_pc[4:3] ^ io_update_hist[1:0];
wire [6:0] update_idx = {io_update_pc[9:5], _GEN};
wire [1:0] _GEN_0 = io_update_pc[11:10] ^ io_update_hist[1:0];
wire [6:0] update_tag = {io_update_pc[16:12], _GEN_0};
assign table_MPORT_data_0 = doing_reset ? 11'h0 : {1'h1, io_update_pc[16:12], _GEN_0, update_wdata_0_ctr};
assign table_MPORT_data_1 = doing_reset ? 11'h0 : {1'h1, io_update_pc[16:12], _GEN_0, update_wdata_1_ctr};
assign table_MPORT_data_2 = doing_reset ? 11'h0 : {1'h1, io_update_pc[16:12], _GEN_0, update_wdata_2_ctr};
assign table_MPORT_data_3 = doing_reset ? 11'h0 : {1'h1, io_update_pc[16:12], _GEN_0, update_wdata_3_ctr};
wire [6:0] _GEN_1 = {io_update_pc[9:5], _GEN};
wire _GEN_2 = doing_reset | doing_clear_u_hi;
assign hi_us_MPORT_1_data_0 = ~_GEN_2 & update_hi_wdata_0;
assign hi_us_MPORT_1_data_1 = ~_GEN_2 & update_hi_wdata_1;
assign hi_us_MPORT_1_data_2 = ~_GEN_2 & update_hi_wdata_2;
assign hi_us_MPORT_1_data_3 = ~_GEN_2 & update_hi_wdata_3;
wire [3:0] _GEN_3 = {io_update_u_mask_3, io_update_u_mask_2, io_update_u_mask_1, io_update_u_mask_0};
wire _GEN_4 = doing_reset | doing_clear_u_lo;
assign lo_us_MPORT_2_data_0 = ~_GEN_4 & update_lo_wdata_0;
assign lo_us_MPORT_2_data_1 = ~_GEN_4 & update_lo_wdata_1;
assign lo_us_MPORT_2_data_2 = ~_GEN_4 & update_lo_wdata_2;
assign lo_us_MPORT_2_data_3 = ~_GEN_4 & update_lo_wdata_3;
reg [6:0] wrbypass_tags_0;
reg [6:0] wrbypass_tags_1;
reg [6:0] wrbypass_idxs_0;
reg [6:0] wrbypass_idxs_1;
reg [2:0] wrbypass_0_0;
reg [2:0] wrbypass_0_1;
reg [2:0] wrbypass_0_2;
reg [2:0] wrbypass_0_3;
reg [2:0] wrbypass_1_0;
reg [2:0] wrbypass_1_1;
reg [2:0] wrbypass_1_2;
reg [2:0] wrbypass_1_3;
reg wrbypass_enq_idx;
wire wrbypass_hits_0 = ~doing_reset & wrbypass_tags_0 == update_tag & wrbypass_idxs_0 == update_idx;
wire wrbypass_hit = wrbypass_hits_0 | ~doing_reset & wrbypass_tags_1 == update_tag & wrbypass_idxs_1 == update_idx;
wire [2:0] _GEN_5 = wrbypass_hits_0 ? wrbypass_0_0 : wrbypass_1_0;
wire [2:0] _GEN_6 = wrbypass_hits_0 ? wrbypass_0_1 : wrbypass_1_1;
wire [2:0] _GEN_7 = wrbypass_hits_0 ? wrbypass_0_2 : wrbypass_1_2;
wire [2:0] _GEN_8 = wrbypass_hits_0 ? wrbypass_0_3 : wrbypass_1_3;
assign update_wdata_0_ctr = io_update_alloc_0 ? (io_update_taken_0 ? 3'h4 : 3'h3) : wrbypass_hit ? (io_update_taken_0 ? ((&_GEN_5) ? 3'h7 : _GEN_5 + 3'h1) : _GEN_5 == 3'h0 ? 3'h0 : _GEN_5 - 3'h1) : io_update_taken_0 ? ((&io_update_old_ctr_0) ? 3'h7 : io_update_old_ctr_0 + 3'h1) : io_update_old_ctr_0 == 3'h0 ? 3'h0 : io_update_old_ctr_0 - 3'h1;
assign update_hi_wdata_0 = io_update_u_0[1];
assign update_lo_wdata_0 = io_update_u_0[0];
assign update_wdata_1_ctr = io_update_alloc_1 ? (io_update_taken_1 ? 3'h4 : 3'h3) : wrbypass_hit ? (io_update_taken_1 ? ((&_GEN_6) ? 3'h7 : _GEN_6 + 3'h1) : _GEN_6 == 3'h0 ? 3'h0 : _GEN_6 - 3'h1) : io_update_taken_1 ? ((&io_update_old_ctr_1) ? 3'h7 : io_update_old_ctr_1 + 3'h1) : io_update_old_ctr_1 == 3'h0 ? 3'h0 : io_update_old_ctr_1 - 3'h1;
assign update_hi_wdata_1 = io_update_u_1[1];
assign update_lo_wdata_1 = io_update_u_1[0];
assign update_wdata_2_ctr = io_update_alloc_2 ? (io_update_taken_2 ? 3'h4 : 3'h3) : wrbypass_hit ? (io_update_taken_2 ? ((&_GEN_7) ? 3'h7 : _GEN_7 + 3'h1) : _GEN_7 == 3'h0 ? 3'h0 : _GEN_7 - 3'h1) : io_update_taken_2 ? ((&io_update_old_ctr_2) ? 3'h7 : io_update_old_ctr_2 + 3'h1) : io_update_old_ctr_2 == 3'h0 ? 3'h0 : io_update_old_ctr_2 - 3'h1;
assign update_hi_wdata_2 = io_update_u_2[1];
assign update_lo_wdata_2 = io_update_u_2[0];
assign update_wdata_3_ctr = io_update_alloc_3 ? (io_update_taken_3 ? 3'h4 : 3'h3) : wrbypass_hit ? (io_update_taken_3 ? ((&_GEN_8) ? 3'h7 : _GEN_8 + 3'h1) : _GEN_8 == 3'h0 ? 3'h0 : _GEN_8 - 3'h1) : io_update_taken_3 ? ((&io_update_old_ctr_3) ? 3'h7 : io_update_old_ctr_3 + 3'h1) : io_update_old_ctr_3 == 3'h0 ? 3'h0 : io_update_old_ctr_3 - 3'h1;
assign update_hi_wdata_3 = io_update_u_3[1];
assign update_lo_wdata_3 = io_update_u_3[0];
wire _GEN_9 = io_update_mask_0 | io_update_mask_1 | io_update_mask_2 | io_update_mask_3;
wire _GEN_10 = ~_GEN_9 | wrbypass_hit | wrbypass_enq_idx;
wire _GEN_11 = ~_GEN_9 | wrbypass_hit | ~wrbypass_enq_idx;
always @(posedge clock) begin
if (reset) begin
doing_reset <= 1'h1;
reset_idx <= 7'h0;
clear_u_ctr <= 19'h0;
wrbypass_enq_idx <= 1'h0;
end
else begin
doing_reset <= reset_idx != 7'h7F & doing_reset;
reset_idx <= reset_idx + {6'h0, doing_reset};
clear_u_ctr <= doing_reset ? 19'h1 : clear_u_ctr + 19'h1;
if (~_GEN_9 | wrbypass_hit) begin
end
else
wrbypass_enq_idx <= wrbypass_enq_idx - 1'h1;
end
s2_tag <= {io_f1_req_pc[16:12], io_f1_req_pc[11:10] ^ io_f1_req_ghist[1:0]};
io_f3_resp_0_valid_REG <= _table_R0_data[10] & _table_R0_data[9:3] == s2_tag & ~doing_reset;
io_f3_resp_0_bits_u_REG <= {_hi_us_R0_data[0], _lo_us_R0_data[0]};
io_f3_resp_0_bits_ctr_REG <= _table_R0_data[2:0];
io_f3_resp_1_valid_REG <= _table_R0_data[21] & _table_R0_data[20:14] == s2_tag & ~doing_reset;
io_f3_resp_1_bits_u_REG <= {_hi_us_R0_data[1], _lo_us_R0_data[1]};
io_f3_resp_1_bits_ctr_REG <= _table_R0_data[13:11];
io_f3_resp_2_valid_REG <= _table_R0_data[32] & _table_R0_data[31:25] == s2_tag & ~doing_reset;
io_f3_resp_2_bits_u_REG <= {_hi_us_R0_data[2], _lo_us_R0_data[2]};
io_f3_resp_2_bits_ctr_REG <= _table_R0_data[24:22];
io_f3_resp_3_valid_REG <= _table_R0_data[43] & _table_R0_data[42:36] == s2_tag & ~doing_reset;
io_f3_resp_3_bits_u_REG <= {_hi_us_R0_data[3], _lo_us_R0_data[3]};
io_f3_resp_3_bits_ctr_REG <= _table_R0_data[35:33];
if (_GEN_10) begin
end
else
wrbypass_tags_0 <= update_tag;
if (_GEN_11) begin
end
else
wrbypass_tags_1 <= update_tag;
if (_GEN_10) begin
end
else
wrbypass_idxs_0 <= update_idx;
if (_GEN_11) begin
end
else
wrbypass_idxs_1 <= update_idx;
if (_GEN_9) begin
if (wrbypass_hit) begin
if (wrbypass_hits_0) begin
wrbypass_0_0 <= update_wdata_0_ctr;
wrbypass_0_1 <= update_wdata_1_ctr;
wrbypass_0_2 <= update_wdata_2_ctr;
wrbypass_0_3 <= update_wdata_3_ctr;
end
else begin
wrbypass_1_0 <= update_wdata_0_ctr;
wrbypass_1_1 <= update_wdata_1_ctr;
wrbypass_1_2 <= update_wdata_2_ctr;
wrbypass_1_3 <= update_wdata_3_ctr;
end
end
else if (wrbypass_enq_idx) begin
wrbypass_1_0 <= update_wdata_0_ctr;
wrbypass_1_1 <= update_wdata_1_ctr;
wrbypass_1_2 <= update_wdata_2_ctr;
wrbypass_1_3 <= update_wdata_3_ctr;
end
else begin
wrbypass_0_0 <= update_wdata_0_ctr;
wrbypass_0_1 <= update_wdata_1_ctr;
wrbypass_0_2 <= update_wdata_2_ctr;
wrbypass_0_3 <= update_wdata_3_ctr;
end
end
end
hi_us hi_us (
.R0_addr (s1_hashed_idx),
.R0_en (io_f1_req_valid),
.R0_clk (clock),
.R0_data (_hi_us_R0_data),
.W0_addr (doing_reset ? reset_idx : doing_clear_u_hi ? clear_u_ctr[17:11] : _GEN_1),
.W0_clk (clock),
.W0_data ({hi_us_MPORT_1_data_3, hi_us_MPORT_1_data_2, hi_us_MPORT_1_data_1, hi_us_MPORT_1_data_0}),
.W0_mask (_GEN_2 ? 4'hF : _GEN_3)
);
lo_us lo_us (
.R0_addr (s1_hashed_idx),
.R0_en (io_f1_req_valid),
.R0_clk (clock),
.R0_data (_lo_us_R0_data),
.W0_addr (doing_reset ? reset_idx : doing_clear_u_lo ? clear_u_ctr[17:11] : _GEN_1),
.W0_clk (clock),
.W0_data ({lo_us_MPORT_2_data_3, lo_us_MPORT_2_data_2, lo_us_MPORT_2_data_1, lo_us_MPORT_2_data_0}),
.W0_mask (_GEN_4 ? 4'hF : _GEN_3)
);
table_0 table_0 (
.R0_addr (s1_hashed_idx),
.R0_en (io_f1_req_valid),
.R0_clk (clock),
.R0_data (_table_R0_data),
.W0_addr (doing_reset ? reset_idx : update_idx),
.W0_clk (clock),
.W0_data ({table_MPORT_data_3, table_MPORT_data_2, table_MPORT_data_1, table_MPORT_data_0}),
.W0_mask (doing_reset ? 4'hF : {io_update_mask_3, io_update_mask_2, io_update_mask_1, io_update_mask_0})
);
assign io_f3_resp_0_valid = io_f3_resp_0_valid_REG;
assign io_f3_resp_0_bits_ctr = io_f3_resp_0_bits_ctr_REG;
assign io_f3_resp_0_bits_u = io_f3_resp_0_bits_u_REG;
assign io_f3_resp_1_valid = io_f3_resp_1_valid_REG;
assign io_f3_resp_1_bits_ctr = io_f3_resp_1_bits_ctr_REG;
assign io_f3_resp_1_bits_u = io_f3_resp_1_bits_u_REG;
assign io_f3_resp_2_valid = io_f3_resp_2_valid_REG;
assign io_f3_resp_2_bits_ctr = io_f3_resp_2_bits_ctr_REG;
assign io_f3_resp_2_bits_u = io_f3_resp_2_bits_u_REG;
assign io_f3_resp_3_valid = io_f3_resp_3_valid_REG;
assign io_f3_resp_3_bits_ctr = io_f3_resp_3_bits_ctr_REG;
assign io_f3_resp_3_bits_u = io_f3_resp_3_bits_u_REG;
endmodule |
Generate the Verilog code corresponding to this Chisel code /*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import freechips.rocketchip.tilelink._
import TLPermissions._
import TLMessages._
import MetaData._
import chisel3.PrintableHelper
import chisel3.experimental.dataview._
class ScheduleRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val a = Valid(new SourceARequest(params))
val b = Valid(new SourceBRequest(params))
val c = Valid(new SourceCRequest(params))
val d = Valid(new SourceDRequest(params))
val e = Valid(new SourceERequest(params))
val x = Valid(new SourceXRequest(params))
val dir = Valid(new DirectoryWrite(params))
val reload = Bool() // get next request via allocate (if any)
}
class MSHRStatus(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val way = UInt(params.wayBits.W)
val blockB = Bool()
val nestB = Bool()
val blockC = Bool()
val nestC = Bool()
}
class NestedWriteback(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val b_toN = Bool() // nested Probes may unhit us
val b_toB = Bool() // nested Probes may demote us
val b_clr_dirty = Bool() // nested Probes clear dirty
val c_set_dirty = Bool() // nested Releases MAY set dirty
}
sealed trait CacheState
{
val code = CacheState.index.U
CacheState.index = CacheState.index + 1
}
object CacheState
{
var index = 0
}
case object S_INVALID extends CacheState
case object S_BRANCH extends CacheState
case object S_BRANCH_C extends CacheState
case object S_TIP extends CacheState
case object S_TIP_C extends CacheState
case object S_TIP_CD extends CacheState
case object S_TIP_D extends CacheState
case object S_TRUNK_C extends CacheState
case object S_TRUNK_CD extends CacheState
class MSHR(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val allocate = Flipped(Valid(new AllocateRequest(params))) // refills MSHR for next cycle
val directory = Flipped(Valid(new DirectoryResult(params))) // triggers schedule setup
val status = Valid(new MSHRStatus(params))
val schedule = Decoupled(new ScheduleRequest(params))
val sinkc = Flipped(Valid(new SinkCResponse(params)))
val sinkd = Flipped(Valid(new SinkDResponse(params)))
val sinke = Flipped(Valid(new SinkEResponse(params)))
val nestedwb = Flipped(new NestedWriteback(params))
})
val request_valid = RegInit(false.B)
val request = Reg(new FullRequest(params))
val meta_valid = RegInit(false.B)
val meta = Reg(new DirectoryResult(params))
// Define which states are valid
when (meta_valid) {
when (meta.state === INVALID) {
assert (!meta.clients.orR)
assert (!meta.dirty)
}
when (meta.state === BRANCH) {
assert (!meta.dirty)
}
when (meta.state === TRUNK) {
assert (meta.clients.orR)
assert ((meta.clients & (meta.clients - 1.U)) === 0.U) // at most one
}
when (meta.state === TIP) {
// noop
}
}
// Completed transitions (s_ = scheduled), (w_ = waiting)
val s_rprobe = RegInit(true.B) // B
val w_rprobeackfirst = RegInit(true.B)
val w_rprobeacklast = RegInit(true.B)
val s_release = RegInit(true.B) // CW w_rprobeackfirst
val w_releaseack = RegInit(true.B)
val s_pprobe = RegInit(true.B) // B
val s_acquire = RegInit(true.B) // A s_release, s_pprobe [1]
val s_flush = RegInit(true.B) // X w_releaseack
val w_grantfirst = RegInit(true.B)
val w_grantlast = RegInit(true.B)
val w_grant = RegInit(true.B) // first | last depending on wormhole
val w_pprobeackfirst = RegInit(true.B)
val w_pprobeacklast = RegInit(true.B)
val w_pprobeack = RegInit(true.B) // first | last depending on wormhole
val s_probeack = RegInit(true.B) // C w_pprobeackfirst (mutually exclusive with next two s_*)
val s_grantack = RegInit(true.B) // E w_grantfirst ... CAN require both outE&inD to service outD
val s_execute = RegInit(true.B) // D w_pprobeack, w_grant
val w_grantack = RegInit(true.B)
val s_writeback = RegInit(true.B) // W w_*
// [1]: We cannot issue outer Acquire while holding blockB (=> outA can stall)
// However, inB and outC are higher priority than outB, so s_release and s_pprobe
// may be safely issued while blockB. Thus we must NOT try to schedule the
// potentially stuck s_acquire with either of them (scheduler is all or none).
// Meta-data that we discover underway
val sink = Reg(UInt(params.outer.bundle.sinkBits.W))
val gotT = Reg(Bool())
val bad_grant = Reg(Bool())
val probes_done = Reg(UInt(params.clientBits.W))
val probes_toN = Reg(UInt(params.clientBits.W))
val probes_noT = Reg(Bool())
// When a nested transaction completes, update our meta data
when (meta_valid && meta.state =/= INVALID &&
io.nestedwb.set === request.set && io.nestedwb.tag === meta.tag) {
when (io.nestedwb.b_clr_dirty) { meta.dirty := false.B }
when (io.nestedwb.c_set_dirty) { meta.dirty := true.B }
when (io.nestedwb.b_toB) { meta.state := BRANCH }
when (io.nestedwb.b_toN) { meta.hit := false.B }
}
// Scheduler status
io.status.valid := request_valid
io.status.bits.set := request.set
io.status.bits.tag := request.tag
io.status.bits.way := meta.way
io.status.bits.blockB := !meta_valid || ((!w_releaseack || !w_rprobeacklast || !w_pprobeacklast) && !w_grantfirst)
io.status.bits.nestB := meta_valid && w_releaseack && w_rprobeacklast && w_pprobeacklast && !w_grantfirst
// The above rules ensure we will block and not nest an outer probe while still doing our
// own inner probes. Thus every probe wakes exactly one MSHR.
io.status.bits.blockC := !meta_valid
io.status.bits.nestC := meta_valid && (!w_rprobeackfirst || !w_pprobeackfirst || !w_grantfirst)
// The w_grantfirst in nestC is necessary to deal with:
// acquire waiting for grant, inner release gets queued, outer probe -> inner probe -> deadlock
// ... this is possible because the release+probe can be for same set, but different tag
// We can only demand: block, nest, or queue
assert (!io.status.bits.nestB || !io.status.bits.blockB)
assert (!io.status.bits.nestC || !io.status.bits.blockC)
// Scheduler requests
val no_wait = w_rprobeacklast && w_releaseack && w_grantlast && w_pprobeacklast && w_grantack
io.schedule.bits.a.valid := !s_acquire && s_release && s_pprobe
io.schedule.bits.b.valid := !s_rprobe || !s_pprobe
io.schedule.bits.c.valid := (!s_release && w_rprobeackfirst) || (!s_probeack && w_pprobeackfirst)
io.schedule.bits.d.valid := !s_execute && w_pprobeack && w_grant
io.schedule.bits.e.valid := !s_grantack && w_grantfirst
io.schedule.bits.x.valid := !s_flush && w_releaseack
io.schedule.bits.dir.valid := (!s_release && w_rprobeackfirst) || (!s_writeback && no_wait)
io.schedule.bits.reload := no_wait
io.schedule.valid := io.schedule.bits.a.valid || io.schedule.bits.b.valid || io.schedule.bits.c.valid ||
io.schedule.bits.d.valid || io.schedule.bits.e.valid || io.schedule.bits.x.valid ||
io.schedule.bits.dir.valid
// Schedule completions
when (io.schedule.ready) {
s_rprobe := true.B
when (w_rprobeackfirst) { s_release := true.B }
s_pprobe := true.B
when (s_release && s_pprobe) { s_acquire := true.B }
when (w_releaseack) { s_flush := true.B }
when (w_pprobeackfirst) { s_probeack := true.B }
when (w_grantfirst) { s_grantack := true.B }
when (w_pprobeack && w_grant) { s_execute := true.B }
when (no_wait) { s_writeback := true.B }
// Await the next operation
when (no_wait) {
request_valid := false.B
meta_valid := false.B
}
}
// Resulting meta-data
val final_meta_writeback = WireInit(meta)
val req_clientBit = params.clientBit(request.source)
val req_needT = needT(request.opcode, request.param)
val req_acquire = request.opcode === AcquireBlock || request.opcode === AcquirePerm
val meta_no_clients = !meta.clients.orR
val req_promoteT = req_acquire && Mux(meta.hit, meta_no_clients && meta.state === TIP, gotT)
when (request.prio(2) && (!params.firstLevel).B) { // always a hit
final_meta_writeback.dirty := meta.dirty || request.opcode(0)
final_meta_writeback.state := Mux(request.param =/= TtoT && meta.state === TRUNK, TIP, meta.state)
final_meta_writeback.clients := meta.clients & ~Mux(isToN(request.param), req_clientBit, 0.U)
final_meta_writeback.hit := true.B // chained requests are hits
} .elsewhen (request.control && params.control.B) { // request.prio(0)
when (meta.hit) {
final_meta_writeback.dirty := false.B
final_meta_writeback.state := INVALID
final_meta_writeback.clients := meta.clients & ~probes_toN
}
final_meta_writeback.hit := false.B
} .otherwise {
final_meta_writeback.dirty := (meta.hit && meta.dirty) || !request.opcode(2)
final_meta_writeback.state := Mux(req_needT,
Mux(req_acquire, TRUNK, TIP),
Mux(!meta.hit, Mux(gotT, Mux(req_acquire, TRUNK, TIP), BRANCH),
MuxLookup(meta.state, 0.U(2.W))(Seq(
INVALID -> BRANCH,
BRANCH -> BRANCH,
TRUNK -> TIP,
TIP -> Mux(meta_no_clients && req_acquire, TRUNK, TIP)))))
final_meta_writeback.clients := Mux(meta.hit, meta.clients & ~probes_toN, 0.U) |
Mux(req_acquire, req_clientBit, 0.U)
final_meta_writeback.tag := request.tag
final_meta_writeback.hit := true.B
}
when (bad_grant) {
when (meta.hit) {
// upgrade failed (B -> T)
assert (!meta_valid || meta.state === BRANCH)
final_meta_writeback.hit := true.B
final_meta_writeback.dirty := false.B
final_meta_writeback.state := BRANCH
final_meta_writeback.clients := meta.clients & ~probes_toN
} .otherwise {
// failed N -> (T or B)
final_meta_writeback.hit := false.B
final_meta_writeback.dirty := false.B
final_meta_writeback.state := INVALID
final_meta_writeback.clients := 0.U
}
}
val invalid = Wire(new DirectoryEntry(params))
invalid.dirty := false.B
invalid.state := INVALID
invalid.clients := 0.U
invalid.tag := 0.U
// Just because a client says BtoT, by the time we process the request he may be N.
// Therefore, we must consult our own meta-data state to confirm he owns the line still.
val honour_BtoT = meta.hit && (meta.clients & req_clientBit).orR
// The client asking us to act is proof they don't have permissions.
val excluded_client = Mux(meta.hit && request.prio(0) && skipProbeN(request.opcode, params.cache.hintsSkipProbe), req_clientBit, 0.U)
io.schedule.bits.a.bits.tag := request.tag
io.schedule.bits.a.bits.set := request.set
io.schedule.bits.a.bits.param := Mux(req_needT, Mux(meta.hit, BtoT, NtoT), NtoB)
io.schedule.bits.a.bits.block := request.size =/= log2Ceil(params.cache.blockBytes).U ||
!(request.opcode === PutFullData || request.opcode === AcquirePerm)
io.schedule.bits.a.bits.source := 0.U
io.schedule.bits.b.bits.param := Mux(!s_rprobe, toN, Mux(request.prio(1), request.param, Mux(req_needT, toN, toB)))
io.schedule.bits.b.bits.tag := Mux(!s_rprobe, meta.tag, request.tag)
io.schedule.bits.b.bits.set := request.set
io.schedule.bits.b.bits.clients := meta.clients & ~excluded_client
io.schedule.bits.c.bits.opcode := Mux(meta.dirty, ReleaseData, Release)
io.schedule.bits.c.bits.param := Mux(meta.state === BRANCH, BtoN, TtoN)
io.schedule.bits.c.bits.source := 0.U
io.schedule.bits.c.bits.tag := meta.tag
io.schedule.bits.c.bits.set := request.set
io.schedule.bits.c.bits.way := meta.way
io.schedule.bits.c.bits.dirty := meta.dirty
io.schedule.bits.d.bits.viewAsSupertype(chiselTypeOf(request)) := request
io.schedule.bits.d.bits.param := Mux(!req_acquire, request.param,
MuxLookup(request.param, request.param)(Seq(
NtoB -> Mux(req_promoteT, NtoT, NtoB),
BtoT -> Mux(honour_BtoT, BtoT, NtoT),
NtoT -> NtoT)))
io.schedule.bits.d.bits.sink := 0.U
io.schedule.bits.d.bits.way := meta.way
io.schedule.bits.d.bits.bad := bad_grant
io.schedule.bits.e.bits.sink := sink
io.schedule.bits.x.bits.fail := false.B
io.schedule.bits.dir.bits.set := request.set
io.schedule.bits.dir.bits.way := meta.way
io.schedule.bits.dir.bits.data := Mux(!s_release, invalid, WireInit(new DirectoryEntry(params), init = final_meta_writeback))
// Coverage of state transitions
def cacheState(entry: DirectoryEntry, hit: Bool) = {
val out = WireDefault(0.U)
val c = entry.clients.orR
val d = entry.dirty
switch (entry.state) {
is (BRANCH) { out := Mux(c, S_BRANCH_C.code, S_BRANCH.code) }
is (TRUNK) { out := Mux(d, S_TRUNK_CD.code, S_TRUNK_C.code) }
is (TIP) { out := Mux(c, Mux(d, S_TIP_CD.code, S_TIP_C.code), Mux(d, S_TIP_D.code, S_TIP.code)) }
is (INVALID) { out := S_INVALID.code }
}
when (!hit) { out := S_INVALID.code }
out
}
val p = !params.lastLevel // can be probed
val c = !params.firstLevel // can be acquired
val m = params.inner.client.clients.exists(!_.supports.probe) // can be written (or read)
val r = params.outer.manager.managers.exists(!_.alwaysGrantsT) // read-only devices exist
val f = params.control // flush control register exists
val cfg = (p, c, m, r, f)
val b = r || p // can reach branch state (via probe downgrade or read-only device)
// The cache must be used for something or we would not be here
require(c || m)
val evict = cacheState(meta, !meta.hit)
val before = cacheState(meta, meta.hit)
val after = cacheState(final_meta_writeback, true.B)
def eviction(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(evict === from.code, s"MSHR_${from}_EVICT", s"State transition from ${from} to evicted ${cfg}")
} else {
assert(!(evict === from.code), cf"State transition from ${from} to evicted should be impossible ${cfg}")
}
if (cover && f) {
params.ccover(before === from.code, s"MSHR_${from}_FLUSH", s"State transition from ${from} to flushed ${cfg}")
} else {
assert(!(before === from.code), cf"State transition from ${from} to flushed should be impossible ${cfg}")
}
}
def transition(from: CacheState, to: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(before === from.code && after === to.code, s"MSHR_${from}_${to}", s"State transition from ${from} to ${to} ${cfg}")
} else {
assert(!(before === from.code && after === to.code), cf"State transition from ${from} to ${to} should be impossible ${cfg}")
}
}
when ((!s_release && w_rprobeackfirst) && io.schedule.ready) {
eviction(S_BRANCH, b) // MMIO read to read-only device
eviction(S_BRANCH_C, b && c) // you need children to become C
eviction(S_TIP, true) // MMIO read || clean release can lead to this state
eviction(S_TIP_C, c) // needs two clients || client + mmio || downgrading client
eviction(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client
eviction(S_TIP_D, true) // MMIO write || dirty release lead here
eviction(S_TRUNK_C, c) // acquire for write
eviction(S_TRUNK_CD, c) // dirty release then reacquire
}
when ((!s_writeback && no_wait) && io.schedule.ready) {
transition(S_INVALID, S_BRANCH, b && m) // only MMIO can bring us to BRANCH state
transition(S_INVALID, S_BRANCH_C, b && c) // C state is only possible if there are inner caches
transition(S_INVALID, S_TIP, m) // MMIO read
transition(S_INVALID, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_INVALID, S_TIP_CD, false) // acquire does not cause dirty immediately
transition(S_INVALID, S_TIP_D, m) // MMIO write
transition(S_INVALID, S_TRUNK_C, c) // acquire
transition(S_INVALID, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH, S_INVALID, b && p) // probe can do this (flushes run as evictions)
transition(S_BRANCH, S_BRANCH_C, b && c) // acquire
transition(S_BRANCH, S_TIP, b && m) // prefetch write
transition(S_BRANCH, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_BRANCH, S_TIP_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH, S_TIP_D, b && m) // MMIO write
transition(S_BRANCH, S_TRUNK_C, b && c) // acquire
transition(S_BRANCH, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH_C, S_INVALID, b && c && p)
transition(S_BRANCH_C, S_BRANCH, b && c) // clean release (optional)
transition(S_BRANCH_C, S_TIP, b && c && m) // prefetch write
transition(S_BRANCH_C, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_BRANCH_C, S_TIP_D, b && c && m) // MMIO write
transition(S_BRANCH_C, S_TIP_CD, false) // going dirty means we must shoot down clients
transition(S_BRANCH_C, S_TRUNK_C, b && c) // acquire
transition(S_BRANCH_C, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_TIP, S_INVALID, p)
transition(S_TIP, S_BRANCH, p) // losing TIP only possible via probe
transition(S_TIP, S_BRANCH_C, false) // we would go S_TRUNK_C instead
transition(S_TIP, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP, S_TIP_D, m) // direct dirty only via MMIO write
transition(S_TIP, S_TIP_CD, false) // acquire does not make us dirty immediately
transition(S_TIP, S_TRUNK_C, c) // acquire
transition(S_TIP, S_TRUNK_CD, false) // acquire does not make us dirty immediately
transition(S_TIP_C, S_INVALID, c && p)
transition(S_TIP_C, S_BRANCH, c && p) // losing TIP only possible via probe
transition(S_TIP_C, S_BRANCH_C, c && p) // losing TIP only possible via probe
transition(S_TIP_C, S_TIP, c) // probed while MMIO read || clean release (optional)
transition(S_TIP_C, S_TIP_D, c && m) // direct dirty only via MMIO write
transition(S_TIP_C, S_TIP_CD, false) // going dirty means we must shoot down clients
transition(S_TIP_C, S_TRUNK_C, c) // acquire
transition(S_TIP_C, S_TRUNK_CD, false) // acquire does not make us immediately dirty
transition(S_TIP_D, S_INVALID, p)
transition(S_TIP_D, S_BRANCH, p) // losing D is only possible via probe
transition(S_TIP_D, S_BRANCH_C, p && c) // probed while acquire shared
transition(S_TIP_D, S_TIP, p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TIP_D, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP_D, S_TIP_CD, false) // we would go S_TRUNK_CD instead
transition(S_TIP_D, S_TRUNK_C, p && c) // probed while acquired
transition(S_TIP_D, S_TRUNK_CD, c) // acquire
transition(S_TIP_CD, S_INVALID, c && p)
transition(S_TIP_CD, S_BRANCH, c && p) // losing D is only possible via probe
transition(S_TIP_CD, S_BRANCH_C, c && p) // losing D is only possible via probe
transition(S_TIP_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TIP_CD, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP_CD, S_TIP_D, c) // MMIO write || clean release (optional)
transition(S_TIP_CD, S_TRUNK_C, c && p) // probed while acquire
transition(S_TIP_CD, S_TRUNK_CD, c) // acquire
transition(S_TRUNK_C, S_INVALID, c && p)
transition(S_TRUNK_C, S_BRANCH, c && p) // losing TIP only possible via probe
transition(S_TRUNK_C, S_BRANCH_C, c && p) // losing TIP only possible via probe
transition(S_TRUNK_C, S_TIP, c) // MMIO read || clean release (optional)
transition(S_TRUNK_C, S_TIP_C, c) // bounce shared
transition(S_TRUNK_C, S_TIP_D, c) // dirty release
transition(S_TRUNK_C, S_TIP_CD, c) // dirty bounce shared
transition(S_TRUNK_C, S_TRUNK_CD, c) // dirty bounce
transition(S_TRUNK_CD, S_INVALID, c && p)
transition(S_TRUNK_CD, S_BRANCH, c && p) // losing D only possible via probe
transition(S_TRUNK_CD, S_BRANCH_C, c && p) // losing D only possible via probe
transition(S_TRUNK_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TRUNK_CD, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TRUNK_CD, S_TIP_D, c) // dirty release
transition(S_TRUNK_CD, S_TIP_CD, c) // bounce shared
transition(S_TRUNK_CD, S_TRUNK_C, c && p) // probed while acquire
}
// Handle response messages
val probe_bit = params.clientBit(io.sinkc.bits.source)
val last_probe = (probes_done | probe_bit) === (meta.clients & ~excluded_client)
val probe_toN = isToN(io.sinkc.bits.param)
if (!params.firstLevel) when (io.sinkc.valid) {
params.ccover( probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_FULL", "Client downgraded to N when asked only to do B")
params.ccover(!probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_HALF", "Client downgraded to B when asked only to do B")
// Caution: the probe matches us only in set.
// We would never allow an outer probe to nest until both w_[rp]probeack complete, so
// it is safe to just unguardedly update the probe FSM.
probes_done := probes_done | probe_bit
probes_toN := probes_toN | Mux(probe_toN, probe_bit, 0.U)
probes_noT := probes_noT || io.sinkc.bits.param =/= TtoT
w_rprobeackfirst := w_rprobeackfirst || last_probe
w_rprobeacklast := w_rprobeacklast || (last_probe && io.sinkc.bits.last)
w_pprobeackfirst := w_pprobeackfirst || last_probe
w_pprobeacklast := w_pprobeacklast || (last_probe && io.sinkc.bits.last)
// Allow wormhole routing from sinkC if the first request beat has offset 0
val set_pprobeack = last_probe && (io.sinkc.bits.last || request.offset === 0.U)
w_pprobeack := w_pprobeack || set_pprobeack
params.ccover(!set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_SERIAL", "Sequential routing of probe response data")
params.ccover( set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_WORMHOLE", "Wormhole routing of probe response data")
// However, meta-data updates need to be done more cautiously
when (meta.state =/= INVALID && io.sinkc.bits.tag === meta.tag && io.sinkc.bits.data) { meta.dirty := true.B } // !!!
}
when (io.sinkd.valid) {
when (io.sinkd.bits.opcode === Grant || io.sinkd.bits.opcode === GrantData) {
sink := io.sinkd.bits.sink
w_grantfirst := true.B
w_grantlast := io.sinkd.bits.last
// Record if we need to prevent taking ownership
bad_grant := io.sinkd.bits.denied
// Allow wormhole routing for requests whose first beat has offset 0
w_grant := request.offset === 0.U || io.sinkd.bits.last
params.ccover(io.sinkd.bits.opcode === GrantData && request.offset === 0.U, "MSHR_GRANT_WORMHOLE", "Wormhole routing of grant response data")
params.ccover(io.sinkd.bits.opcode === GrantData && request.offset =/= 0.U, "MSHR_GRANT_SERIAL", "Sequential routing of grant response data")
gotT := io.sinkd.bits.param === toT
}
.elsewhen (io.sinkd.bits.opcode === ReleaseAck) {
w_releaseack := true.B
}
}
when (io.sinke.valid) {
w_grantack := true.B
}
// Bootstrap new requests
val allocate_as_full = WireInit(new FullRequest(params), init = io.allocate.bits)
val new_meta = Mux(io.allocate.valid && io.allocate.bits.repeat, final_meta_writeback, io.directory.bits)
val new_request = Mux(io.allocate.valid, allocate_as_full, request)
val new_needT = needT(new_request.opcode, new_request.param)
val new_clientBit = params.clientBit(new_request.source)
val new_skipProbe = Mux(skipProbeN(new_request.opcode, params.cache.hintsSkipProbe), new_clientBit, 0.U)
val prior = cacheState(final_meta_writeback, true.B)
def bypass(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(prior === from.code, s"MSHR_${from}_BYPASS", s"State bypass transition from ${from} ${cfg}")
} else {
assert(!(prior === from.code), cf"State bypass from ${from} should be impossible ${cfg}")
}
}
when (io.allocate.valid && io.allocate.bits.repeat) {
bypass(S_INVALID, f || p) // Can lose permissions (probe/flush)
bypass(S_BRANCH, b) // MMIO read to read-only device
bypass(S_BRANCH_C, b && c) // you need children to become C
bypass(S_TIP, true) // MMIO read || clean release can lead to this state
bypass(S_TIP_C, c) // needs two clients || client + mmio || downgrading client
bypass(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client
bypass(S_TIP_D, true) // MMIO write || dirty release lead here
bypass(S_TRUNK_C, c) // acquire for write
bypass(S_TRUNK_CD, c) // dirty release then reacquire
}
when (io.allocate.valid) {
assert (!request_valid || (no_wait && io.schedule.fire))
request_valid := true.B
request := io.allocate.bits
}
// Create execution plan
when (io.directory.valid || (io.allocate.valid && io.allocate.bits.repeat)) {
meta_valid := true.B
meta := new_meta
probes_done := 0.U
probes_toN := 0.U
probes_noT := false.B
gotT := false.B
bad_grant := false.B
// These should already be either true or turning true
// We clear them here explicitly to simplify the mux tree
s_rprobe := true.B
w_rprobeackfirst := true.B
w_rprobeacklast := true.B
s_release := true.B
w_releaseack := true.B
s_pprobe := true.B
s_acquire := true.B
s_flush := true.B
w_grantfirst := true.B
w_grantlast := true.B
w_grant := true.B
w_pprobeackfirst := true.B
w_pprobeacklast := true.B
w_pprobeack := true.B
s_probeack := true.B
s_grantack := true.B
s_execute := true.B
w_grantack := true.B
s_writeback := true.B
// For C channel requests (ie: Release[Data])
when (new_request.prio(2) && (!params.firstLevel).B) {
s_execute := false.B
// Do we need to go dirty?
when (new_request.opcode(0) && !new_meta.dirty) {
s_writeback := false.B
}
// Does our state change?
when (isToB(new_request.param) && new_meta.state === TRUNK) {
s_writeback := false.B
}
// Do our clients change?
when (isToN(new_request.param) && (new_meta.clients & new_clientBit) =/= 0.U) {
s_writeback := false.B
}
assert (new_meta.hit)
}
// For X channel requests (ie: flush)
.elsewhen (new_request.control && params.control.B) { // new_request.prio(0)
s_flush := false.B
// Do we need to actually do something?
when (new_meta.hit) {
s_release := false.B
w_releaseack := false.B
// Do we need to shoot-down inner caches?
when ((!params.firstLevel).B && (new_meta.clients =/= 0.U)) {
s_rprobe := false.B
w_rprobeackfirst := false.B
w_rprobeacklast := false.B
}
}
}
// For A channel requests
.otherwise { // new_request.prio(0) && !new_request.control
s_execute := false.B
// Do we need an eviction?
when (!new_meta.hit && new_meta.state =/= INVALID) {
s_release := false.B
w_releaseack := false.B
// Do we need to shoot-down inner caches?
when ((!params.firstLevel).B & (new_meta.clients =/= 0.U)) {
s_rprobe := false.B
w_rprobeackfirst := false.B
w_rprobeacklast := false.B
}
}
// Do we need an acquire?
when (!new_meta.hit || (new_meta.state === BRANCH && new_needT)) {
s_acquire := false.B
w_grantfirst := false.B
w_grantlast := false.B
w_grant := false.B
s_grantack := false.B
s_writeback := false.B
}
// Do we need a probe?
when ((!params.firstLevel).B && (new_meta.hit &&
(new_needT || new_meta.state === TRUNK) &&
(new_meta.clients & ~new_skipProbe) =/= 0.U)) {
s_pprobe := false.B
w_pprobeackfirst := false.B
w_pprobeacklast := false.B
w_pprobeack := false.B
s_writeback := false.B
}
// Do we need a grantack?
when (new_request.opcode === AcquireBlock || new_request.opcode === AcquirePerm) {
w_grantack := false.B
s_writeback := false.B
}
// Becomes dirty?
when (!new_request.opcode(2) && new_meta.hit && !new_meta.dirty) {
s_writeback := false.B
}
}
}
} | module MSHR(
input clock,
input reset,
input io_allocate_valid,
input io_allocate_bits_prio_0,
input io_allocate_bits_prio_2,
input io_allocate_bits_control,
input [2:0] io_allocate_bits_opcode,
input [2:0] io_allocate_bits_param,
input [2:0] io_allocate_bits_size,
input [5:0] io_allocate_bits_source,
input [12:0] io_allocate_bits_tag,
input [5:0] io_allocate_bits_offset,
input [5:0] io_allocate_bits_put,
input [9:0] io_allocate_bits_set,
input io_allocate_bits_repeat,
input io_directory_valid,
input io_directory_bits_dirty,
input [1:0] io_directory_bits_state,
input io_directory_bits_clients,
input [12:0] io_directory_bits_tag,
input io_directory_bits_hit,
input [2:0] io_directory_bits_way,
output io_status_valid,
output [9:0] io_status_bits_set,
output [12:0] io_status_bits_tag,
output [2:0] io_status_bits_way,
input io_schedule_ready,
output io_schedule_valid,
output io_schedule_bits_a_valid,
output [12:0] io_schedule_bits_a_bits_tag,
output [9:0] io_schedule_bits_a_bits_set,
output [2:0] io_schedule_bits_a_bits_param,
output io_schedule_bits_a_bits_block,
output io_schedule_bits_c_valid,
output [2:0] io_schedule_bits_c_bits_opcode,
output [2:0] io_schedule_bits_c_bits_param,
output [12:0] io_schedule_bits_c_bits_tag,
output [9:0] io_schedule_bits_c_bits_set,
output [2:0] io_schedule_bits_c_bits_way,
output io_schedule_bits_c_bits_dirty,
output io_schedule_bits_d_valid,
output io_schedule_bits_d_bits_prio_0,
output io_schedule_bits_d_bits_prio_2,
output [2:0] io_schedule_bits_d_bits_opcode,
output [2:0] io_schedule_bits_d_bits_param,
output [2:0] io_schedule_bits_d_bits_size,
output [5:0] io_schedule_bits_d_bits_source,
output [5:0] io_schedule_bits_d_bits_offset,
output [5:0] io_schedule_bits_d_bits_put,
output [9:0] io_schedule_bits_d_bits_set,
output [2:0] io_schedule_bits_d_bits_way,
output io_schedule_bits_d_bits_bad,
output io_schedule_bits_e_valid,
output [2:0] io_schedule_bits_e_bits_sink,
output io_schedule_bits_x_valid,
output io_schedule_bits_dir_valid,
output [9:0] io_schedule_bits_dir_bits_set,
output [2:0] io_schedule_bits_dir_bits_way,
output io_schedule_bits_dir_bits_data_dirty,
output [1:0] io_schedule_bits_dir_bits_data_state,
output io_schedule_bits_dir_bits_data_clients,
output [12:0] io_schedule_bits_dir_bits_data_tag,
output io_schedule_bits_reload,
input io_sinkd_valid,
input io_sinkd_bits_last,
input [2:0] io_sinkd_bits_opcode,
input [2:0] io_sinkd_bits_param,
input [2:0] io_sinkd_bits_sink,
input io_sinkd_bits_denied,
input [9:0] io_nestedwb_set,
input [12:0] io_nestedwb_tag,
input io_nestedwb_b_toN,
input io_nestedwb_b_toB,
input io_nestedwb_b_clr_dirty,
input io_nestedwb_c_set_dirty
);
reg request_valid;
reg request_prio_0;
reg request_prio_2;
reg request_control;
reg [2:0] request_opcode;
reg [2:0] request_param;
reg [2:0] request_size;
reg [5:0] request_source;
reg [12:0] request_tag;
reg [5:0] request_offset;
reg [5:0] request_put;
reg [9:0] request_set;
reg meta_valid;
reg meta_dirty;
reg [1:0] meta_state;
reg evict_c;
reg [12:0] meta_tag;
reg meta_hit;
reg [2:0] meta_way;
reg s_release;
reg w_releaseack;
reg s_acquire;
reg s_flush;
reg w_grantfirst;
reg w_grantlast;
reg w_grant;
reg s_grantack;
reg s_execute;
reg w_grantack;
reg s_writeback;
reg [2:0] sink;
reg gotT;
reg bad_grant;
wire no_wait = w_releaseack & w_grantlast & w_grantack;
wire io_schedule_bits_a_valid_0 = ~s_acquire & s_release;
wire io_schedule_bits_d_valid_0 = ~s_execute & w_grant;
wire io_schedule_bits_e_valid_0 = ~s_grantack & w_grantfirst;
wire io_schedule_bits_x_valid_0 = ~s_flush & w_releaseack;
wire io_schedule_bits_dir_valid_0 = ~s_release | ~s_writeback & no_wait;
wire io_schedule_valid_0 = io_schedule_bits_a_valid_0 | io_schedule_bits_d_valid_0 | io_schedule_bits_e_valid_0 | io_schedule_bits_x_valid_0 | io_schedule_bits_dir_valid_0;
wire _excluded_client_T_1 = request_opcode == 3'h6;
wire req_needT = ~(request_opcode[2]) | request_opcode == 3'h5 & request_param == 3'h1 | (_excluded_client_T_1 | (&request_opcode)) & (|request_param);
wire req_acquire = _excluded_client_T_1 | (&request_opcode);
wire [1:0] _final_meta_writeback_state_T_6 = {1'h1, ~req_acquire};
wire [1:0] _final_meta_writeback_state_T_17 = req_needT ? _final_meta_writeback_state_T_6 : meta_hit ? ((&meta_state) ? {1'h1, ~(~evict_c & req_acquire)} : {meta_state == 2'h2, 1'h1}) : gotT ? _final_meta_writeback_state_T_6 : 2'h1;
wire final_meta_writeback_dirty = ~bad_grant & (request_control ? ~meta_hit & meta_dirty : meta_hit & meta_dirty | ~(request_opcode[2]));
wire [1:0] _GEN = {1'h0, meta_hit};
wire [1:0] final_meta_writeback_state = bad_grant ? _GEN : request_control ? (meta_hit ? 2'h0 : meta_state) : _final_meta_writeback_state_T_17;
wire after_c = bad_grant ? meta_hit & evict_c : (request_control | meta_hit) & evict_c;
wire _new_meta_T = io_allocate_valid & io_allocate_bits_repeat;
wire _GEN_32 = io_schedule_ready & no_wait;
wire [1:0] new_meta_state = _new_meta_T ? final_meta_writeback_state : io_directory_bits_state;
wire new_meta_hit = _new_meta_T ? (bad_grant ? meta_hit : ~request_control) : io_directory_bits_hit;
wire new_request_control = io_allocate_valid ? io_allocate_bits_control : request_control;
wire [2:0] new_request_opcode = io_allocate_valid ? io_allocate_bits_opcode : request_opcode;
wire [2:0] new_request_param = io_allocate_valid ? io_allocate_bits_param : request_param;
wire _new_skipProbe_T = new_request_opcode == 3'h6;
wire _GEN_33 = new_request_control ? ~new_meta_hit : ~(~new_meta_hit & (|new_meta_state));
wire _GEN_34 = ~new_meta_hit | new_meta_state == 2'h1 & (~(new_request_opcode[2]) | new_request_opcode == 3'h5 & new_request_param == 3'h1 | (_new_skipProbe_T | (&new_request_opcode)) & (|new_request_param));
wire _GEN_35 = new_request_control | ~_GEN_34;
wire _GEN_36 = _new_skipProbe_T | (&new_request_opcode);
wire _GEN_37 = meta_valid & (|meta_state) & io_nestedwb_set == request_set & io_nestedwb_tag == meta_tag;
wire _GEN_38 = io_sinkd_bits_opcode == 3'h4 | io_sinkd_bits_opcode == 3'h5;
wire _GEN_39 = io_sinkd_valid & _GEN_38;
wire new_meta_dirty = _new_meta_T ? final_meta_writeback_dirty : io_directory_bits_dirty;
wire _GEN_40 = io_directory_valid | _new_meta_T;
always @(posedge clock) begin
if (reset) begin
request_valid <= 1'h0;
meta_valid <= 1'h0;
s_release <= 1'h1;
w_releaseack <= 1'h1;
s_acquire <= 1'h1;
s_flush <= 1'h1;
w_grantfirst <= 1'h1;
w_grantlast <= 1'h1;
w_grant <= 1'h1;
s_grantack <= 1'h1;
s_execute <= 1'h1;
w_grantack <= 1'h1;
s_writeback <= 1'h1;
end
else begin
request_valid <= io_allocate_valid | ~_GEN_32 & request_valid;
meta_valid <= _GEN_40 | ~_GEN_32 & meta_valid;
s_release <= _GEN_40 ? _GEN_33 : io_schedule_ready | s_release;
w_releaseack <= _GEN_40 ? _GEN_33 : io_sinkd_valid & ~_GEN_38 & io_sinkd_bits_opcode == 3'h6 | w_releaseack;
s_acquire <= _GEN_40 ? _GEN_35 : io_schedule_ready & s_release | s_acquire;
s_flush <= _GEN_40 ? ~new_request_control : io_schedule_ready & w_releaseack | s_flush;
w_grantfirst <= _GEN_40 ? _GEN_35 : _GEN_39 | w_grantfirst;
if (_GEN_40) begin
w_grantlast <= _GEN_35;
w_grant <= _GEN_35;
w_grantack <= new_request_control | ~_GEN_36;
end
else if (_GEN_39) begin
w_grantlast <= io_sinkd_bits_last;
w_grant <= request_offset == 6'h0 | io_sinkd_bits_last;
end
s_grantack <= _GEN_40 ? _GEN_35 : io_schedule_ready & w_grantfirst | s_grantack;
s_execute <= _GEN_40 ? new_request_control : io_schedule_ready & w_grant | s_execute;
s_writeback <= _GEN_40 ? new_request_control | ~(~(new_request_opcode[2]) & new_meta_hit & ~new_meta_dirty | _GEN_36) & ~_GEN_34 : _GEN_32 | s_writeback;
end
if (io_allocate_valid) begin
request_prio_0 <= io_allocate_bits_prio_0;
request_prio_2 <= io_allocate_bits_prio_2;
request_control <= io_allocate_bits_control;
request_opcode <= io_allocate_bits_opcode;
request_param <= io_allocate_bits_param;
request_size <= io_allocate_bits_size;
request_source <= io_allocate_bits_source;
request_tag <= io_allocate_bits_tag;
request_offset <= io_allocate_bits_offset;
request_put <= io_allocate_bits_put;
request_set <= io_allocate_bits_set;
end
if (_GEN_40) begin
meta_dirty <= new_meta_dirty;
if (_new_meta_T) begin
if (bad_grant)
meta_state <= _GEN;
else begin
if (request_control) begin
if (meta_hit)
meta_state <= 2'h0;
end
else
meta_state <= _final_meta_writeback_state_T_17;
meta_hit <= ~request_control;
end
if (request_control) begin
end
else
meta_tag <= request_tag;
end
else begin
meta_state <= io_directory_bits_state;
meta_tag <= io_directory_bits_tag;
meta_hit <= io_directory_bits_hit;
end
evict_c <= _new_meta_T ? after_c : io_directory_bits_clients;
end
else begin
if (_GEN_37)
meta_dirty <= io_nestedwb_c_set_dirty | ~io_nestedwb_b_clr_dirty & meta_dirty;
if (_GEN_37 & io_nestedwb_b_toB)
meta_state <= 2'h1;
meta_hit <= ~(_GEN_37 & io_nestedwb_b_toN) & meta_hit;
end
if (~_GEN_40 | _new_meta_T) begin
end
else
meta_way <= io_directory_bits_way;
if (_GEN_39)
sink <= io_sinkd_bits_sink;
gotT <= ~_GEN_40 & (_GEN_39 ? io_sinkd_bits_param == 3'h0 : gotT);
bad_grant <= ~_GEN_40 & (_GEN_39 ? io_sinkd_bits_denied : bad_grant);
end
assign io_status_valid = request_valid;
assign io_status_bits_set = request_set;
assign io_status_bits_tag = request_tag;
assign io_status_bits_way = meta_way;
assign io_schedule_valid = io_schedule_valid_0;
assign io_schedule_bits_a_valid = io_schedule_bits_a_valid_0;
assign io_schedule_bits_a_bits_tag = request_tag;
assign io_schedule_bits_a_bits_set = request_set;
assign io_schedule_bits_a_bits_param = {1'h0, req_needT ? (meta_hit ? 2'h2 : 2'h1) : 2'h0};
assign io_schedule_bits_a_bits_block = request_size != 3'h6 | ~(request_opcode == 3'h0 | (&request_opcode));
assign io_schedule_bits_c_valid = ~s_release;
assign io_schedule_bits_c_bits_opcode = {2'h3, meta_dirty};
assign io_schedule_bits_c_bits_param = meta_state == 2'h1 ? 3'h2 : 3'h1;
assign io_schedule_bits_c_bits_tag = meta_tag;
assign io_schedule_bits_c_bits_set = request_set;
assign io_schedule_bits_c_bits_way = meta_way;
assign io_schedule_bits_c_bits_dirty = meta_dirty;
assign io_schedule_bits_d_valid = io_schedule_bits_d_valid_0;
assign io_schedule_bits_d_bits_prio_0 = request_prio_0;
assign io_schedule_bits_d_bits_prio_2 = request_prio_2;
assign io_schedule_bits_d_bits_opcode = request_opcode;
assign io_schedule_bits_d_bits_param = req_acquire ? (request_param == 3'h1 | request_param == 3'h2 ? 3'h1 : request_param == 3'h0 ? {2'h0, req_acquire & (meta_hit ? ~evict_c & (&meta_state) : gotT)} : request_param) : request_param;
assign io_schedule_bits_d_bits_size = request_size;
assign io_schedule_bits_d_bits_source = request_source;
assign io_schedule_bits_d_bits_offset = request_offset;
assign io_schedule_bits_d_bits_put = request_put;
assign io_schedule_bits_d_bits_set = request_set;
assign io_schedule_bits_d_bits_way = meta_way;
assign io_schedule_bits_d_bits_bad = bad_grant;
assign io_schedule_bits_e_valid = io_schedule_bits_e_valid_0;
assign io_schedule_bits_e_bits_sink = sink;
assign io_schedule_bits_x_valid = io_schedule_bits_x_valid_0;
assign io_schedule_bits_dir_valid = io_schedule_bits_dir_valid_0;
assign io_schedule_bits_dir_bits_set = request_set;
assign io_schedule_bits_dir_bits_way = meta_way;
assign io_schedule_bits_dir_bits_data_dirty = s_release & final_meta_writeback_dirty;
assign io_schedule_bits_dir_bits_data_state = s_release ? final_meta_writeback_state : 2'h0;
assign io_schedule_bits_dir_bits_data_clients = s_release & after_c;
assign io_schedule_bits_dir_bits_data_tag = s_release ? (request_control ? meta_tag : request_tag) : 13'h0;
assign io_schedule_bits_reload = no_wait;
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{Decoupled, DecoupledIO}
// A Repeater passes its input to its output, unless repeat is asserted.
// When repeat is asserted, the Repeater copies the input and repeats it next cycle.
class Repeater[T <: Data](gen: T) extends Module
{
override def desiredName = s"Repeater_${gen.typeName}"
val io = IO( new Bundle {
val repeat = Input(Bool())
val full = Output(Bool())
val enq = Flipped(Decoupled(gen.cloneType))
val deq = Decoupled(gen.cloneType)
} )
val full = RegInit(false.B)
val saved = Reg(gen.cloneType)
// When !full, a repeater is pass-through
io.deq.valid := io.enq.valid || full
io.enq.ready := io.deq.ready && !full
io.deq.bits := Mux(full, saved, io.enq.bits)
io.full := full
when (io.enq.fire && io.repeat) { full := true.B; saved := io.enq.bits }
when (io.deq.fire && !io.repeat) { full := false.B }
}
object Repeater
{
def apply[T <: Data](enq: DecoupledIO[T], repeat: Bool): DecoupledIO[T] = {
val repeater = Module(new Repeater(chiselTypeOf(enq.bits)))
repeater.io.repeat := repeat
repeater.io.enq <> enq
repeater.io.deq
}
} | module Repeater_TLBundleA_a28d64s4k1z3u(
input clock,
input reset,
input io_repeat,
output io_full,
output io_enq_ready,
input io_enq_valid,
input [2:0] io_enq_bits_opcode,
input [2:0] io_enq_bits_param,
input [2:0] io_enq_bits_size,
input [3:0] io_enq_bits_source,
input [27:0] io_enq_bits_address,
input [7:0] io_enq_bits_mask,
input io_enq_bits_corrupt,
input io_deq_ready,
output io_deq_valid,
output [2:0] io_deq_bits_opcode,
output [2:0] io_deq_bits_param,
output [2:0] io_deq_bits_size,
output [3:0] io_deq_bits_source,
output [27:0] io_deq_bits_address,
output [7:0] io_deq_bits_mask,
output io_deq_bits_corrupt
);
reg full;
reg [2:0] saved_opcode;
reg [2:0] saved_param;
reg [2:0] saved_size;
reg [3:0] saved_source;
reg [27:0] saved_address;
reg [7:0] saved_mask;
reg saved_corrupt;
wire io_deq_valid_0 = io_enq_valid | full;
wire io_enq_ready_0 = io_deq_ready & ~full;
wire _GEN = io_enq_ready_0 & io_enq_valid & io_repeat;
always @(posedge clock) begin
if (reset)
full <= 1'h0;
else
full <= ~(io_deq_ready & io_deq_valid_0 & ~io_repeat) & (_GEN | full);
if (_GEN) begin
saved_opcode <= io_enq_bits_opcode;
saved_param <= io_enq_bits_param;
saved_size <= io_enq_bits_size;
saved_source <= io_enq_bits_source;
saved_address <= io_enq_bits_address;
saved_mask <= io_enq_bits_mask;
saved_corrupt <= io_enq_bits_corrupt;
end
end
assign io_full = full;
assign io_enq_ready = io_enq_ready_0;
assign io_deq_valid = io_deq_valid_0;
assign io_deq_bits_opcode = full ? saved_opcode : io_enq_bits_opcode;
assign io_deq_bits_param = full ? saved_param : io_enq_bits_param;
assign io_deq_bits_size = full ? saved_size : io_enq_bits_size;
assign io_deq_bits_source = full ? saved_source : io_enq_bits_source;
assign io_deq_bits_address = full ? saved_address : io_enq_bits_address;
assign io_deq_bits_mask = full ? saved_mask : io_enq_bits_mask;
assign io_deq_bits_corrupt = full ? saved_corrupt : io_enq_bits_corrupt;
endmodule |
Generate the Verilog code corresponding to this Chisel code //******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v3.common.{MicroOp}
import boom.v3.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U}
def apply(ip: UInt, isel: UInt): SInt = {
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt
}
}
/**
* Object to get the FP rounding mode out of a packed immediate.
*/
object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } }
/**
* Object to get the FP function fype from a packed immediate.
* Note: only works if !(IS_B or IS_S)
*/
object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } }
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v3.common.BoomModule()(p)
with boom.v3.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire)
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop)
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop))
io.deq.bits := out
io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop)
// For flow queue behavior.
if (flow) {
when (io.empty) {
io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop)
io.deq.bits := io.enq.bits
io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
do_deq := false.B
when (io.deq.ready) { do_enq := false.B }
}
}
private val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
} | module BranchKillableQueue_4(
input clock,
input reset,
output io_enq_ready,
input io_enq_valid,
input [6:0] io_enq_bits_uop_uopc,
input [31:0] io_enq_bits_uop_inst,
input [31:0] io_enq_bits_uop_debug_inst,
input io_enq_bits_uop_is_rvc,
input [39:0] io_enq_bits_uop_debug_pc,
input [2:0] io_enq_bits_uop_iq_type,
input [9:0] io_enq_bits_uop_fu_code,
input [3:0] io_enq_bits_uop_ctrl_br_type,
input [1:0] io_enq_bits_uop_ctrl_op1_sel,
input [2:0] io_enq_bits_uop_ctrl_op2_sel,
input [2:0] io_enq_bits_uop_ctrl_imm_sel,
input [4:0] io_enq_bits_uop_ctrl_op_fcn,
input io_enq_bits_uop_ctrl_fcn_dw,
input [2:0] io_enq_bits_uop_ctrl_csr_cmd,
input io_enq_bits_uop_ctrl_is_load,
input io_enq_bits_uop_ctrl_is_sta,
input io_enq_bits_uop_ctrl_is_std,
input [1:0] io_enq_bits_uop_iw_state,
input io_enq_bits_uop_iw_p1_poisoned,
input io_enq_bits_uop_iw_p2_poisoned,
input io_enq_bits_uop_is_br,
input io_enq_bits_uop_is_jalr,
input io_enq_bits_uop_is_jal,
input io_enq_bits_uop_is_sfb,
input [7:0] io_enq_bits_uop_br_mask,
input [2:0] io_enq_bits_uop_br_tag,
input [3:0] io_enq_bits_uop_ftq_idx,
input io_enq_bits_uop_edge_inst,
input [5:0] io_enq_bits_uop_pc_lob,
input io_enq_bits_uop_taken,
input [19:0] io_enq_bits_uop_imm_packed,
input [11:0] io_enq_bits_uop_csr_addr,
input [4:0] io_enq_bits_uop_rob_idx,
input [2:0] io_enq_bits_uop_ldq_idx,
input [2:0] io_enq_bits_uop_stq_idx,
input [1:0] io_enq_bits_uop_rxq_idx,
input [5:0] io_enq_bits_uop_pdst,
input [5:0] io_enq_bits_uop_prs1,
input [5:0] io_enq_bits_uop_prs2,
input [5:0] io_enq_bits_uop_prs3,
input [3:0] io_enq_bits_uop_ppred,
input io_enq_bits_uop_prs1_busy,
input io_enq_bits_uop_prs2_busy,
input io_enq_bits_uop_prs3_busy,
input io_enq_bits_uop_ppred_busy,
input [5:0] io_enq_bits_uop_stale_pdst,
input io_enq_bits_uop_exception,
input [63:0] io_enq_bits_uop_exc_cause,
input io_enq_bits_uop_bypassable,
input [4:0] io_enq_bits_uop_mem_cmd,
input [1:0] io_enq_bits_uop_mem_size,
input io_enq_bits_uop_mem_signed,
input io_enq_bits_uop_is_fence,
input io_enq_bits_uop_is_fencei,
input io_enq_bits_uop_is_amo,
input io_enq_bits_uop_uses_ldq,
input io_enq_bits_uop_uses_stq,
input io_enq_bits_uop_is_sys_pc2epc,
input io_enq_bits_uop_is_unique,
input io_enq_bits_uop_flush_on_commit,
input io_enq_bits_uop_ldst_is_rs1,
input [5:0] io_enq_bits_uop_ldst,
input [5:0] io_enq_bits_uop_lrs1,
input [5:0] io_enq_bits_uop_lrs2,
input [5:0] io_enq_bits_uop_lrs3,
input io_enq_bits_uop_ldst_val,
input [1:0] io_enq_bits_uop_dst_rtype,
input [1:0] io_enq_bits_uop_lrs1_rtype,
input [1:0] io_enq_bits_uop_lrs2_rtype,
input io_enq_bits_uop_frs3_en,
input io_enq_bits_uop_fp_val,
input io_enq_bits_uop_fp_single,
input io_enq_bits_uop_xcpt_pf_if,
input io_enq_bits_uop_xcpt_ae_if,
input io_enq_bits_uop_xcpt_ma_if,
input io_enq_bits_uop_bp_debug_if,
input io_enq_bits_uop_bp_xcpt_if,
input [1:0] io_enq_bits_uop_debug_fsrc,
input [1:0] io_enq_bits_uop_debug_tsrc,
input [64:0] io_enq_bits_data,
input io_enq_bits_fflags_valid,
input [6:0] io_enq_bits_fflags_bits_uop_uopc,
input [31:0] io_enq_bits_fflags_bits_uop_inst,
input [31:0] io_enq_bits_fflags_bits_uop_debug_inst,
input io_enq_bits_fflags_bits_uop_is_rvc,
input [39:0] io_enq_bits_fflags_bits_uop_debug_pc,
input [2:0] io_enq_bits_fflags_bits_uop_iq_type,
input [9:0] io_enq_bits_fflags_bits_uop_fu_code,
input [3:0] io_enq_bits_fflags_bits_uop_ctrl_br_type,
input [1:0] io_enq_bits_fflags_bits_uop_ctrl_op1_sel,
input [2:0] io_enq_bits_fflags_bits_uop_ctrl_op2_sel,
input [2:0] io_enq_bits_fflags_bits_uop_ctrl_imm_sel,
input [4:0] io_enq_bits_fflags_bits_uop_ctrl_op_fcn,
input io_enq_bits_fflags_bits_uop_ctrl_fcn_dw,
input [2:0] io_enq_bits_fflags_bits_uop_ctrl_csr_cmd,
input io_enq_bits_fflags_bits_uop_ctrl_is_load,
input io_enq_bits_fflags_bits_uop_ctrl_is_sta,
input io_enq_bits_fflags_bits_uop_ctrl_is_std,
input [1:0] io_enq_bits_fflags_bits_uop_iw_state,
input io_enq_bits_fflags_bits_uop_iw_p1_poisoned,
input io_enq_bits_fflags_bits_uop_iw_p2_poisoned,
input io_enq_bits_fflags_bits_uop_is_br,
input io_enq_bits_fflags_bits_uop_is_jalr,
input io_enq_bits_fflags_bits_uop_is_jal,
input io_enq_bits_fflags_bits_uop_is_sfb,
input [7:0] io_enq_bits_fflags_bits_uop_br_mask,
input [2:0] io_enq_bits_fflags_bits_uop_br_tag,
input [3:0] io_enq_bits_fflags_bits_uop_ftq_idx,
input io_enq_bits_fflags_bits_uop_edge_inst,
input [5:0] io_enq_bits_fflags_bits_uop_pc_lob,
input io_enq_bits_fflags_bits_uop_taken,
input [19:0] io_enq_bits_fflags_bits_uop_imm_packed,
input [11:0] io_enq_bits_fflags_bits_uop_csr_addr,
input [4:0] io_enq_bits_fflags_bits_uop_rob_idx,
input [2:0] io_enq_bits_fflags_bits_uop_ldq_idx,
input [2:0] io_enq_bits_fflags_bits_uop_stq_idx,
input [1:0] io_enq_bits_fflags_bits_uop_rxq_idx,
input [5:0] io_enq_bits_fflags_bits_uop_pdst,
input [5:0] io_enq_bits_fflags_bits_uop_prs1,
input [5:0] io_enq_bits_fflags_bits_uop_prs2,
input [5:0] io_enq_bits_fflags_bits_uop_prs3,
input [3:0] io_enq_bits_fflags_bits_uop_ppred,
input io_enq_bits_fflags_bits_uop_prs1_busy,
input io_enq_bits_fflags_bits_uop_prs2_busy,
input io_enq_bits_fflags_bits_uop_prs3_busy,
input io_enq_bits_fflags_bits_uop_ppred_busy,
input [5:0] io_enq_bits_fflags_bits_uop_stale_pdst,
input io_enq_bits_fflags_bits_uop_exception,
input [63:0] io_enq_bits_fflags_bits_uop_exc_cause,
input io_enq_bits_fflags_bits_uop_bypassable,
input [4:0] io_enq_bits_fflags_bits_uop_mem_cmd,
input [1:0] io_enq_bits_fflags_bits_uop_mem_size,
input io_enq_bits_fflags_bits_uop_mem_signed,
input io_enq_bits_fflags_bits_uop_is_fence,
input io_enq_bits_fflags_bits_uop_is_fencei,
input io_enq_bits_fflags_bits_uop_is_amo,
input io_enq_bits_fflags_bits_uop_uses_ldq,
input io_enq_bits_fflags_bits_uop_uses_stq,
input io_enq_bits_fflags_bits_uop_is_sys_pc2epc,
input io_enq_bits_fflags_bits_uop_is_unique,
input io_enq_bits_fflags_bits_uop_flush_on_commit,
input io_enq_bits_fflags_bits_uop_ldst_is_rs1,
input [5:0] io_enq_bits_fflags_bits_uop_ldst,
input [5:0] io_enq_bits_fflags_bits_uop_lrs1,
input [5:0] io_enq_bits_fflags_bits_uop_lrs2,
input [5:0] io_enq_bits_fflags_bits_uop_lrs3,
input io_enq_bits_fflags_bits_uop_ldst_val,
input [1:0] io_enq_bits_fflags_bits_uop_dst_rtype,
input [1:0] io_enq_bits_fflags_bits_uop_lrs1_rtype,
input [1:0] io_enq_bits_fflags_bits_uop_lrs2_rtype,
input io_enq_bits_fflags_bits_uop_frs3_en,
input io_enq_bits_fflags_bits_uop_fp_val,
input io_enq_bits_fflags_bits_uop_fp_single,
input io_enq_bits_fflags_bits_uop_xcpt_pf_if,
input io_enq_bits_fflags_bits_uop_xcpt_ae_if,
input io_enq_bits_fflags_bits_uop_xcpt_ma_if,
input io_enq_bits_fflags_bits_uop_bp_debug_if,
input io_enq_bits_fflags_bits_uop_bp_xcpt_if,
input [1:0] io_enq_bits_fflags_bits_uop_debug_fsrc,
input [1:0] io_enq_bits_fflags_bits_uop_debug_tsrc,
input [4:0] io_enq_bits_fflags_bits_flags,
input io_deq_ready,
output io_deq_valid,
output [6:0] io_deq_bits_uop_uopc,
output [7:0] io_deq_bits_uop_br_mask,
output [4:0] io_deq_bits_uop_rob_idx,
output [2:0] io_deq_bits_uop_stq_idx,
output [5:0] io_deq_bits_uop_pdst,
output io_deq_bits_uop_is_amo,
output io_deq_bits_uop_uses_stq,
output [1:0] io_deq_bits_uop_dst_rtype,
output io_deq_bits_uop_fp_val,
output [64:0] io_deq_bits_data,
output io_deq_bits_predicated,
output io_deq_bits_fflags_valid,
output [4:0] io_deq_bits_fflags_bits_uop_rob_idx,
output [4:0] io_deq_bits_fflags_bits_flags,
input [7:0] io_brupdate_b1_resolve_mask,
input [7:0] io_brupdate_b1_mispredict_mask,
input io_flush,
output io_empty
);
wire [76:0] _ram_ext_R0_data;
reg valids_0;
reg valids_1;
reg valids_2;
reg valids_3;
reg valids_4;
reg valids_5;
reg valids_6;
reg [6:0] uops_0_uopc;
reg [7:0] uops_0_br_mask;
reg [4:0] uops_0_rob_idx;
reg [2:0] uops_0_stq_idx;
reg [5:0] uops_0_pdst;
reg uops_0_is_amo;
reg uops_0_uses_stq;
reg [1:0] uops_0_dst_rtype;
reg uops_0_fp_val;
reg [6:0] uops_1_uopc;
reg [7:0] uops_1_br_mask;
reg [4:0] uops_1_rob_idx;
reg [2:0] uops_1_stq_idx;
reg [5:0] uops_1_pdst;
reg uops_1_is_amo;
reg uops_1_uses_stq;
reg [1:0] uops_1_dst_rtype;
reg uops_1_fp_val;
reg [6:0] uops_2_uopc;
reg [7:0] uops_2_br_mask;
reg [4:0] uops_2_rob_idx;
reg [2:0] uops_2_stq_idx;
reg [5:0] uops_2_pdst;
reg uops_2_is_amo;
reg uops_2_uses_stq;
reg [1:0] uops_2_dst_rtype;
reg uops_2_fp_val;
reg [6:0] uops_3_uopc;
reg [7:0] uops_3_br_mask;
reg [4:0] uops_3_rob_idx;
reg [2:0] uops_3_stq_idx;
reg [5:0] uops_3_pdst;
reg uops_3_is_amo;
reg uops_3_uses_stq;
reg [1:0] uops_3_dst_rtype;
reg uops_3_fp_val;
reg [6:0] uops_4_uopc;
reg [7:0] uops_4_br_mask;
reg [4:0] uops_4_rob_idx;
reg [2:0] uops_4_stq_idx;
reg [5:0] uops_4_pdst;
reg uops_4_is_amo;
reg uops_4_uses_stq;
reg [1:0] uops_4_dst_rtype;
reg uops_4_fp_val;
reg [6:0] uops_5_uopc;
reg [7:0] uops_5_br_mask;
reg [4:0] uops_5_rob_idx;
reg [2:0] uops_5_stq_idx;
reg [5:0] uops_5_pdst;
reg uops_5_is_amo;
reg uops_5_uses_stq;
reg [1:0] uops_5_dst_rtype;
reg uops_5_fp_val;
reg [6:0] uops_6_uopc;
reg [7:0] uops_6_br_mask;
reg [4:0] uops_6_rob_idx;
reg [2:0] uops_6_stq_idx;
reg [5:0] uops_6_pdst;
reg uops_6_is_amo;
reg uops_6_uses_stq;
reg [1:0] uops_6_dst_rtype;
reg uops_6_fp_val;
reg [2:0] enq_ptr_value;
reg [2:0] deq_ptr_value;
reg maybe_full;
wire ptr_match = enq_ptr_value == deq_ptr_value;
wire io_empty_0 = ptr_match & ~maybe_full;
wire full = ptr_match & maybe_full;
wire [7:0] _GEN = {{valids_0}, {valids_6}, {valids_5}, {valids_4}, {valids_3}, {valids_2}, {valids_1}, {valids_0}};
wire _GEN_0 = _GEN[deq_ptr_value];
wire [7:0][6:0] _GEN_1 = {{uops_0_uopc}, {uops_6_uopc}, {uops_5_uopc}, {uops_4_uopc}, {uops_3_uopc}, {uops_2_uopc}, {uops_1_uopc}, {uops_0_uopc}};
wire [7:0][7:0] _GEN_2 = {{uops_0_br_mask}, {uops_6_br_mask}, {uops_5_br_mask}, {uops_4_br_mask}, {uops_3_br_mask}, {uops_2_br_mask}, {uops_1_br_mask}, {uops_0_br_mask}};
wire [7:0] out_uop_br_mask = _GEN_2[deq_ptr_value];
wire [7:0][4:0] _GEN_3 = {{uops_0_rob_idx}, {uops_6_rob_idx}, {uops_5_rob_idx}, {uops_4_rob_idx}, {uops_3_rob_idx}, {uops_2_rob_idx}, {uops_1_rob_idx}, {uops_0_rob_idx}};
wire [7:0][2:0] _GEN_4 = {{uops_0_stq_idx}, {uops_6_stq_idx}, {uops_5_stq_idx}, {uops_4_stq_idx}, {uops_3_stq_idx}, {uops_2_stq_idx}, {uops_1_stq_idx}, {uops_0_stq_idx}};
wire [7:0][5:0] _GEN_5 = {{uops_0_pdst}, {uops_6_pdst}, {uops_5_pdst}, {uops_4_pdst}, {uops_3_pdst}, {uops_2_pdst}, {uops_1_pdst}, {uops_0_pdst}};
wire [7:0] _GEN_6 = {{uops_0_is_amo}, {uops_6_is_amo}, {uops_5_is_amo}, {uops_4_is_amo}, {uops_3_is_amo}, {uops_2_is_amo}, {uops_1_is_amo}, {uops_0_is_amo}};
wire [7:0] _GEN_7 = {{uops_0_uses_stq}, {uops_6_uses_stq}, {uops_5_uses_stq}, {uops_4_uses_stq}, {uops_3_uses_stq}, {uops_2_uses_stq}, {uops_1_uses_stq}, {uops_0_uses_stq}};
wire [7:0][1:0] _GEN_8 = {{uops_0_dst_rtype}, {uops_6_dst_rtype}, {uops_5_dst_rtype}, {uops_4_dst_rtype}, {uops_3_dst_rtype}, {uops_2_dst_rtype}, {uops_1_dst_rtype}, {uops_0_dst_rtype}};
wire [7:0] _GEN_9 = {{uops_0_fp_val}, {uops_6_fp_val}, {uops_5_fp_val}, {uops_4_fp_val}, {uops_3_fp_val}, {uops_2_fp_val}, {uops_1_fp_val}, {uops_0_fp_val}};
wire do_deq = ~io_empty_0 & (io_deq_ready | ~_GEN_0) & ~io_empty_0;
wire do_enq = ~(io_empty_0 & io_deq_ready) & ~full & io_enq_valid;
wire _GEN_10 = enq_ptr_value == 3'h0;
wire _GEN_11 = do_enq & _GEN_10;
wire _GEN_12 = enq_ptr_value == 3'h1;
wire _GEN_13 = do_enq & _GEN_12;
wire _GEN_14 = enq_ptr_value == 3'h2;
wire _GEN_15 = do_enq & _GEN_14;
wire _GEN_16 = enq_ptr_value == 3'h3;
wire _GEN_17 = do_enq & _GEN_16;
wire _GEN_18 = enq_ptr_value == 3'h4;
wire _GEN_19 = do_enq & _GEN_18;
wire _GEN_20 = enq_ptr_value == 3'h5;
wire _GEN_21 = do_enq & _GEN_20;
wire _GEN_22 = enq_ptr_value == 3'h6;
wire _GEN_23 = do_enq & _GEN_22;
wire [7:0] _uops_br_mask_T_1 = io_enq_bits_uop_br_mask & ~io_brupdate_b1_resolve_mask;
always @(posedge clock) begin
if (reset) begin
valids_0 <= 1'h0;
valids_1 <= 1'h0;
valids_2 <= 1'h0;
valids_3 <= 1'h0;
valids_4 <= 1'h0;
valids_5 <= 1'h0;
valids_6 <= 1'h0;
enq_ptr_value <= 3'h0;
deq_ptr_value <= 3'h0;
maybe_full <= 1'h0;
end
else begin
valids_0 <= ~(do_deq & deq_ptr_value == 3'h0) & (_GEN_11 | valids_0 & (io_brupdate_b1_mispredict_mask & uops_0_br_mask) == 8'h0 & ~io_flush);
valids_1 <= ~(do_deq & deq_ptr_value == 3'h1) & (_GEN_13 | valids_1 & (io_brupdate_b1_mispredict_mask & uops_1_br_mask) == 8'h0 & ~io_flush);
valids_2 <= ~(do_deq & deq_ptr_value == 3'h2) & (_GEN_15 | valids_2 & (io_brupdate_b1_mispredict_mask & uops_2_br_mask) == 8'h0 & ~io_flush);
valids_3 <= ~(do_deq & deq_ptr_value == 3'h3) & (_GEN_17 | valids_3 & (io_brupdate_b1_mispredict_mask & uops_3_br_mask) == 8'h0 & ~io_flush);
valids_4 <= ~(do_deq & deq_ptr_value == 3'h4) & (_GEN_19 | valids_4 & (io_brupdate_b1_mispredict_mask & uops_4_br_mask) == 8'h0 & ~io_flush);
valids_5 <= ~(do_deq & deq_ptr_value == 3'h5) & (_GEN_21 | valids_5 & (io_brupdate_b1_mispredict_mask & uops_5_br_mask) == 8'h0 & ~io_flush);
valids_6 <= ~(do_deq & deq_ptr_value == 3'h6) & (_GEN_23 | valids_6 & (io_brupdate_b1_mispredict_mask & uops_6_br_mask) == 8'h0 & ~io_flush);
if (do_enq)
enq_ptr_value <= enq_ptr_value == 3'h6 ? 3'h0 : enq_ptr_value + 3'h1;
if (do_deq)
deq_ptr_value <= deq_ptr_value == 3'h6 ? 3'h0 : deq_ptr_value + 3'h1;
if (~(do_enq == do_deq))
maybe_full <= do_enq;
end
if (_GEN_11) begin
uops_0_uopc <= io_enq_bits_uop_uopc;
uops_0_rob_idx <= io_enq_bits_uop_rob_idx;
uops_0_stq_idx <= io_enq_bits_uop_stq_idx;
uops_0_pdst <= io_enq_bits_uop_pdst;
uops_0_is_amo <= io_enq_bits_uop_is_amo;
uops_0_uses_stq <= io_enq_bits_uop_uses_stq;
uops_0_dst_rtype <= io_enq_bits_uop_dst_rtype;
uops_0_fp_val <= io_enq_bits_uop_fp_val;
end
uops_0_br_mask <= do_enq & _GEN_10 ? _uops_br_mask_T_1 : ({8{~valids_0}} | ~io_brupdate_b1_resolve_mask) & uops_0_br_mask;
if (_GEN_13) begin
uops_1_uopc <= io_enq_bits_uop_uopc;
uops_1_rob_idx <= io_enq_bits_uop_rob_idx;
uops_1_stq_idx <= io_enq_bits_uop_stq_idx;
uops_1_pdst <= io_enq_bits_uop_pdst;
uops_1_is_amo <= io_enq_bits_uop_is_amo;
uops_1_uses_stq <= io_enq_bits_uop_uses_stq;
uops_1_dst_rtype <= io_enq_bits_uop_dst_rtype;
uops_1_fp_val <= io_enq_bits_uop_fp_val;
end
uops_1_br_mask <= do_enq & _GEN_12 ? _uops_br_mask_T_1 : ({8{~valids_1}} | ~io_brupdate_b1_resolve_mask) & uops_1_br_mask;
if (_GEN_15) begin
uops_2_uopc <= io_enq_bits_uop_uopc;
uops_2_rob_idx <= io_enq_bits_uop_rob_idx;
uops_2_stq_idx <= io_enq_bits_uop_stq_idx;
uops_2_pdst <= io_enq_bits_uop_pdst;
uops_2_is_amo <= io_enq_bits_uop_is_amo;
uops_2_uses_stq <= io_enq_bits_uop_uses_stq;
uops_2_dst_rtype <= io_enq_bits_uop_dst_rtype;
uops_2_fp_val <= io_enq_bits_uop_fp_val;
end
uops_2_br_mask <= do_enq & _GEN_14 ? _uops_br_mask_T_1 : ({8{~valids_2}} | ~io_brupdate_b1_resolve_mask) & uops_2_br_mask;
if (_GEN_17) begin
uops_3_uopc <= io_enq_bits_uop_uopc;
uops_3_rob_idx <= io_enq_bits_uop_rob_idx;
uops_3_stq_idx <= io_enq_bits_uop_stq_idx;
uops_3_pdst <= io_enq_bits_uop_pdst;
uops_3_is_amo <= io_enq_bits_uop_is_amo;
uops_3_uses_stq <= io_enq_bits_uop_uses_stq;
uops_3_dst_rtype <= io_enq_bits_uop_dst_rtype;
uops_3_fp_val <= io_enq_bits_uop_fp_val;
end
uops_3_br_mask <= do_enq & _GEN_16 ? _uops_br_mask_T_1 : ({8{~valids_3}} | ~io_brupdate_b1_resolve_mask) & uops_3_br_mask;
if (_GEN_19) begin
uops_4_uopc <= io_enq_bits_uop_uopc;
uops_4_rob_idx <= io_enq_bits_uop_rob_idx;
uops_4_stq_idx <= io_enq_bits_uop_stq_idx;
uops_4_pdst <= io_enq_bits_uop_pdst;
uops_4_is_amo <= io_enq_bits_uop_is_amo;
uops_4_uses_stq <= io_enq_bits_uop_uses_stq;
uops_4_dst_rtype <= io_enq_bits_uop_dst_rtype;
uops_4_fp_val <= io_enq_bits_uop_fp_val;
end
uops_4_br_mask <= do_enq & _GEN_18 ? _uops_br_mask_T_1 : ({8{~valids_4}} | ~io_brupdate_b1_resolve_mask) & uops_4_br_mask;
if (_GEN_21) begin
uops_5_uopc <= io_enq_bits_uop_uopc;
uops_5_rob_idx <= io_enq_bits_uop_rob_idx;
uops_5_stq_idx <= io_enq_bits_uop_stq_idx;
uops_5_pdst <= io_enq_bits_uop_pdst;
uops_5_is_amo <= io_enq_bits_uop_is_amo;
uops_5_uses_stq <= io_enq_bits_uop_uses_stq;
uops_5_dst_rtype <= io_enq_bits_uop_dst_rtype;
uops_5_fp_val <= io_enq_bits_uop_fp_val;
end
uops_5_br_mask <= do_enq & _GEN_20 ? _uops_br_mask_T_1 : ({8{~valids_5}} | ~io_brupdate_b1_resolve_mask) & uops_5_br_mask;
if (_GEN_23) begin
uops_6_uopc <= io_enq_bits_uop_uopc;
uops_6_rob_idx <= io_enq_bits_uop_rob_idx;
uops_6_stq_idx <= io_enq_bits_uop_stq_idx;
uops_6_pdst <= io_enq_bits_uop_pdst;
uops_6_is_amo <= io_enq_bits_uop_is_amo;
uops_6_uses_stq <= io_enq_bits_uop_uses_stq;
uops_6_dst_rtype <= io_enq_bits_uop_dst_rtype;
uops_6_fp_val <= io_enq_bits_uop_fp_val;
end
uops_6_br_mask <= do_enq & _GEN_22 ? _uops_br_mask_T_1 : ({8{~valids_6}} | ~io_brupdate_b1_resolve_mask) & uops_6_br_mask;
end
ram_7x77 ram_ext (
.R0_addr (deq_ptr_value),
.R0_en (1'h1),
.R0_clk (clock),
.R0_data (_ram_ext_R0_data),
.W0_addr (enq_ptr_value),
.W0_en (do_enq),
.W0_clk (clock),
.W0_data ({io_enq_bits_fflags_bits_flags, io_enq_bits_fflags_bits_uop_rob_idx, io_enq_bits_fflags_valid, 1'h0, io_enq_bits_data})
);
assign io_enq_ready = ~full;
assign io_deq_valid = io_empty_0 ? io_enq_valid : ~io_empty_0 & _GEN_0 & (io_brupdate_b1_mispredict_mask & out_uop_br_mask) == 8'h0 & ~io_flush;
assign io_deq_bits_uop_uopc = io_empty_0 ? io_enq_bits_uop_uopc : _GEN_1[deq_ptr_value];
assign io_deq_bits_uop_br_mask = io_empty_0 ? io_enq_bits_uop_br_mask & ~io_brupdate_b1_resolve_mask : out_uop_br_mask & ~io_brupdate_b1_resolve_mask;
assign io_deq_bits_uop_rob_idx = io_empty_0 ? io_enq_bits_uop_rob_idx : _GEN_3[deq_ptr_value];
assign io_deq_bits_uop_stq_idx = io_empty_0 ? io_enq_bits_uop_stq_idx : _GEN_4[deq_ptr_value];
assign io_deq_bits_uop_pdst = io_empty_0 ? io_enq_bits_uop_pdst : _GEN_5[deq_ptr_value];
assign io_deq_bits_uop_is_amo = io_empty_0 ? io_enq_bits_uop_is_amo : _GEN_6[deq_ptr_value];
assign io_deq_bits_uop_uses_stq = io_empty_0 ? io_enq_bits_uop_uses_stq : _GEN_7[deq_ptr_value];
assign io_deq_bits_uop_dst_rtype = io_empty_0 ? io_enq_bits_uop_dst_rtype : _GEN_8[deq_ptr_value];
assign io_deq_bits_uop_fp_val = io_empty_0 ? io_enq_bits_uop_fp_val : _GEN_9[deq_ptr_value];
assign io_deq_bits_data = io_empty_0 ? io_enq_bits_data : _ram_ext_R0_data[64:0];
assign io_deq_bits_predicated = ~io_empty_0 & _ram_ext_R0_data[65];
assign io_deq_bits_fflags_valid = io_empty_0 ? io_enq_bits_fflags_valid : _ram_ext_R0_data[66];
assign io_deq_bits_fflags_bits_uop_rob_idx = io_empty_0 ? io_enq_bits_fflags_bits_uop_rob_idx : _ram_ext_R0_data[71:67];
assign io_deq_bits_fflags_bits_flags = io_empty_0 ? io_enq_bits_fflags_bits_flags : _ram_ext_R0_data[76:72];
assign io_empty = io_empty_0;
endmodule |
Generate the Verilog code corresponding to this Chisel code /*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import freechips.rocketchip.util._
case class ListBufferParameters[T <: Data](gen: T, queues: Int, entries: Int, bypass: Boolean)
{
val queueBits = log2Up(queues)
val entryBits = log2Up(entries)
}
class ListBufferPush[T <: Data](params: ListBufferParameters[T]) extends Bundle
{
val index = UInt(params.queueBits.W)
val data = Output(params.gen)
}
class ListBuffer[T <: Data](params: ListBufferParameters[T]) extends Module
{
override def desiredName = s"ListBuffer_${params.gen.typeName}_q${params.queues}_e${params.entries}"
val io = IO(new Bundle {
// push is visible on the same cycle; flow queues
val push = Flipped(Decoupled(new ListBufferPush(params)))
val valid = UInt(params.queues.W)
val pop = Flipped(Valid(UInt(params.queueBits.W)))
val data = Output(params.gen)
})
val valid = RegInit(0.U(params.queues.W))
val head = Mem(params.queues, UInt(params.entryBits.W))
val tail = Mem(params.queues, UInt(params.entryBits.W))
val used = RegInit(0.U(params.entries.W))
val next = Mem(params.entries, UInt(params.entryBits.W))
val data = Mem(params.entries, params.gen)
val freeOH = ~(leftOR(~used) << 1) & ~used
val freeIdx = OHToUInt(freeOH)
val valid_set = WireDefault(0.U(params.queues.W))
val valid_clr = WireDefault(0.U(params.queues.W))
val used_set = WireDefault(0.U(params.entries.W))
val used_clr = WireDefault(0.U(params.entries.W))
val push_tail = tail.read(io.push.bits.index)
val push_valid = valid(io.push.bits.index)
io.push.ready := !used.andR
when (io.push.fire) {
valid_set := UIntToOH(io.push.bits.index, params.queues)
used_set := freeOH
data.write(freeIdx, io.push.bits.data)
when (push_valid) {
next.write(push_tail, freeIdx)
} .otherwise {
head.write(io.push.bits.index, freeIdx)
}
tail.write(io.push.bits.index, freeIdx)
}
val pop_head = head.read(io.pop.bits)
val pop_valid = valid(io.pop.bits)
// Bypass push data to the peek port
io.data := (if (!params.bypass) data.read(pop_head) else Mux(!pop_valid, io.push.bits.data, data.read(pop_head)))
io.valid := (if (!params.bypass) valid else (valid | valid_set))
// It is an error to pop something that is not valid
assert (!io.pop.fire || (io.valid)(io.pop.bits))
when (io.pop.fire) {
used_clr := UIntToOH(pop_head, params.entries)
when (pop_head === tail.read(io.pop.bits)) {
valid_clr := UIntToOH(io.pop.bits, params.queues)
}
head.write(io.pop.bits, Mux(io.push.fire && push_valid && push_tail === pop_head, freeIdx, next.read(pop_head)))
}
// Empty bypass changes no state
when ((!params.bypass).B || !io.pop.valid || pop_valid) {
used := (used & ~used_clr) | used_set
valid := (valid & ~valid_clr) | valid_set
}
} | module tail_15x6(
input [3:0] R0_addr,
input R0_en,
input R0_clk,
output [5:0] R0_data,
input [3:0] R1_addr,
input R1_en,
input R1_clk,
output [5:0] R1_data,
input [3:0] W0_addr,
input W0_en,
input W0_clk,
input [5:0] W0_data
);
reg [5:0] Memory[0:14];
always @(posedge W0_clk) begin
if (W0_en & 1'h1)
Memory[W0_addr] <= W0_data;
end
assign R0_data = R0_en ? Memory[R0_addr] : 6'bx;
assign R1_data = R1_en ? Memory[R1_addr] : 6'bx;
endmodule |
Generate the Verilog code corresponding to this Chisel code /*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import freechips.rocketchip.tilelink._
class SourceERequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val sink = UInt(params.outer.bundle.sinkBits.W)
}
class SourceE(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val req = Flipped(Decoupled(new SourceERequest(params)))
val e = Decoupled(new TLBundleE(params.outer.bundle))
})
// ready must be a register, because we derive valid from ready
require (!params.micro.outerBuf.e.pipe && params.micro.outerBuf.e.isDefined)
val e = Wire(chiselTypeOf(io.e))
io.e <> params.micro.outerBuf.e(e)
io.req.ready := e.ready
e.valid := io.req.valid
e.bits.sink := io.req.bits.sink
// we can't cover valid+!ready, because no backpressure on E is common
} | module SourceE(
input clock,
input reset,
output io_req_ready,
input io_req_valid,
input [2:0] io_req_bits_sink,
output io_e_valid,
output [2:0] io_e_bits_sink
);
Queue2_TLBundleE_a32d64s2k3z3c io_e_q (
.clock (clock),
.reset (reset),
.io_enq_ready (io_req_ready),
.io_enq_valid (io_req_valid),
.io_enq_bits_sink (io_req_bits_sink),
.io_deq_valid (io_e_valid),
.io_deq_bits_sink (io_e_bits_sink)
);
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{Decoupled, DecoupledIO}
// A Repeater passes its input to its output, unless repeat is asserted.
// When repeat is asserted, the Repeater copies the input and repeats it next cycle.
class Repeater[T <: Data](gen: T) extends Module
{
override def desiredName = s"Repeater_${gen.typeName}"
val io = IO( new Bundle {
val repeat = Input(Bool())
val full = Output(Bool())
val enq = Flipped(Decoupled(gen.cloneType))
val deq = Decoupled(gen.cloneType)
} )
val full = RegInit(false.B)
val saved = Reg(gen.cloneType)
// When !full, a repeater is pass-through
io.deq.valid := io.enq.valid || full
io.enq.ready := io.deq.ready && !full
io.deq.bits := Mux(full, saved, io.enq.bits)
io.full := full
when (io.enq.fire && io.repeat) { full := true.B; saved := io.enq.bits }
when (io.deq.fire && !io.repeat) { full := false.B }
}
object Repeater
{
def apply[T <: Data](enq: DecoupledIO[T], repeat: Bool): DecoupledIO[T] = {
val repeater = Module(new Repeater(chiselTypeOf(enq.bits)))
repeater.io.repeat := repeat
repeater.io.enq <> enq
repeater.io.deq
}
} | module Repeater_TLBundleA_a26d64s7k1z3u(
input clock,
input reset,
input io_repeat,
output io_full,
output io_enq_ready,
input io_enq_valid,
input [2:0] io_enq_bits_opcode,
input [2:0] io_enq_bits_param,
input [2:0] io_enq_bits_size,
input [6:0] io_enq_bits_source,
input [25:0] io_enq_bits_address,
input [7:0] io_enq_bits_mask,
input io_enq_bits_corrupt,
input io_deq_ready,
output io_deq_valid,
output [2:0] io_deq_bits_opcode,
output [2:0] io_deq_bits_param,
output [2:0] io_deq_bits_size,
output [6:0] io_deq_bits_source,
output [25:0] io_deq_bits_address,
output [7:0] io_deq_bits_mask,
output io_deq_bits_corrupt
);
reg full;
reg [2:0] saved_opcode;
reg [2:0] saved_param;
reg [2:0] saved_size;
reg [6:0] saved_source;
reg [25:0] saved_address;
reg [7:0] saved_mask;
reg saved_corrupt;
wire io_deq_valid_0 = io_enq_valid | full;
wire io_enq_ready_0 = io_deq_ready & ~full;
wire _GEN = io_enq_ready_0 & io_enq_valid & io_repeat;
always @(posedge clock) begin
if (reset)
full <= 1'h0;
else
full <= ~(io_deq_ready & io_deq_valid_0 & ~io_repeat) & (_GEN | full);
if (_GEN) begin
saved_opcode <= io_enq_bits_opcode;
saved_param <= io_enq_bits_param;
saved_size <= io_enq_bits_size;
saved_source <= io_enq_bits_source;
saved_address <= io_enq_bits_address;
saved_mask <= io_enq_bits_mask;
saved_corrupt <= io_enq_bits_corrupt;
end
end
assign io_full = full;
assign io_enq_ready = io_enq_ready_0;
assign io_deq_valid = io_deq_valid_0;
assign io_deq_bits_opcode = full ? saved_opcode : io_enq_bits_opcode;
assign io_deq_bits_param = full ? saved_param : io_enq_bits_param;
assign io_deq_bits_size = full ? saved_size : io_enq_bits_size;
assign io_deq_bits_source = full ? saved_source : io_enq_bits_source;
assign io_deq_bits_address = full ? saved_address : io_enq_bits_address;
assign io_deq_bits_mask = full ? saved_mask : io_enq_bits_mask;
assign io_deq_bits_corrupt = full ? saved_corrupt : io_enq_bits_corrupt;
endmodule |
Generate the Verilog code corresponding to this Chisel code package testchipip.serdes
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config._
class GenericSerializer[T <: Data](t: T, flitWidth: Int) extends Module {
override def desiredName = s"GenericSerializer_${t.typeName}w${t.getWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(t))
val out = Decoupled(new Flit(flitWidth))
val busy = Output(Bool())
})
val dataBits = t.getWidth.max(flitWidth)
val dataBeats = (dataBits - 1) / flitWidth + 1
require(dataBeats >= 1)
val data = Reg(Vec(dataBeats, UInt(flitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready && beat === 0.U
io.out.valid := io.in.valid || beat =/= 0.U
io.out.bits.flit := Mux(beat === 0.U, io.in.bits.asUInt, data(beat))
when (io.out.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) {
data := io.in.bits.asTypeOf(Vec(dataBeats, UInt(flitWidth.W)))
data(0) := DontCare // unused, DCE this
}
}
io.busy := io.out.valid
}
class GenericDeserializer[T <: Data](t: T, flitWidth: Int) extends Module {
override def desiredName = s"GenericDeserializer_${t.typeName}w${t.getWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(t)
val busy = Output(Bool())
})
val dataBits = t.getWidth.max(flitWidth)
val dataBeats = (dataBits - 1) / flitWidth + 1
require(dataBeats >= 1)
val data = Reg(Vec(dataBeats-1, UInt(flitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready || beat =/= (dataBeats-1).U
io.out.valid := io.in.valid && beat === (dataBeats-1).U
io.out.bits := (if (dataBeats == 1) {
io.in.bits.flit.asTypeOf(t)
} else {
Cat(io.in.bits.flit, data.asUInt).asTypeOf(t)
})
when (io.in.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
if (dataBeats > 1) {
when (beat =/= (dataBeats-1).U) {
data(beat(log2Ceil(dataBeats-1)-1,0)) := io.in.bits.flit
}
}
}
io.busy := beat =/= 0.U
}
class FlitToPhit(flitWidth: Int, phitWidth: Int) extends Module {
override def desiredName = s"FlitToPhit_f${flitWidth}_p${phitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Phit(phitWidth))
})
require(flitWidth >= phitWidth)
val dataBeats = (flitWidth - 1) / phitWidth + 1
val data = Reg(Vec(dataBeats-1, UInt(phitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready && beat === 0.U
io.out.valid := io.in.valid || beat =/= 0.U
io.out.bits.phit := (if (dataBeats == 1) io.in.bits.flit else Mux(beat === 0.U, io.in.bits.flit, data(beat-1.U)))
when (io.out.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) {
data := io.in.bits.asTypeOf(Vec(dataBeats, UInt(phitWidth.W))).tail
}
}
}
object FlitToPhit {
def apply(flit: DecoupledIO[Flit], phitWidth: Int): DecoupledIO[Phit] = {
val flit2phit = Module(new FlitToPhit(flit.bits.flitWidth, phitWidth))
flit2phit.io.in <> flit
flit2phit.io.out
}
}
class PhitToFlit(flitWidth: Int, phitWidth: Int) extends Module {
override def desiredName = s"PhitToFlit_p${phitWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Phit(phitWidth)))
val out = Decoupled(new Flit(flitWidth))
})
require(flitWidth >= phitWidth)
val dataBeats = (flitWidth - 1) / phitWidth + 1
val data = Reg(Vec(dataBeats-1, UInt(phitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready || beat =/= (dataBeats-1).U
io.out.valid := io.in.valid && beat === (dataBeats-1).U
io.out.bits.flit := (if (dataBeats == 1) io.in.bits.phit else Cat(io.in.bits.phit, data.asUInt))
when (io.in.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
if (dataBeats > 1) {
when (beat =/= (dataBeats-1).U) {
data(beat) := io.in.bits.phit
}
}
}
}
object PhitToFlit {
def apply(phit: DecoupledIO[Phit], flitWidth: Int): DecoupledIO[Flit] = {
val phit2flit = Module(new PhitToFlit(flitWidth, phit.bits.phitWidth))
phit2flit.io.in <> phit
phit2flit.io.out
}
def apply(phit: ValidIO[Phit], flitWidth: Int): ValidIO[Flit] = {
val phit2flit = Module(new PhitToFlit(flitWidth, phit.bits.phitWidth))
phit2flit.io.in.valid := phit.valid
phit2flit.io.in.bits := phit.bits
when (phit.valid) { assert(phit2flit.io.in.ready) }
val out = Wire(Valid(new Flit(flitWidth)))
out.valid := phit2flit.io.out.valid
out.bits := phit2flit.io.out.bits
phit2flit.io.out.ready := true.B
out
}
}
class PhitArbiter(phitWidth: Int, flitWidth: Int, channels: Int) extends Module {
override def desiredName = s"PhitArbiter_p${phitWidth}_f${flitWidth}_n${channels}"
val io = IO(new Bundle {
val in = Flipped(Vec(channels, Decoupled(new Phit(phitWidth))))
val out = Decoupled(new Phit(phitWidth))
})
if (channels == 1) {
io.out <> io.in(0)
} else {
val headerWidth = log2Ceil(channels)
val headerBeats = (headerWidth - 1) / phitWidth + 1
val flitBeats = (flitWidth - 1) / phitWidth + 1
val beats = headerBeats + flitBeats
val beat = RegInit(0.U(log2Ceil(beats).W))
val chosen_reg = Reg(UInt(headerWidth.W))
val chosen_prio = PriorityEncoder(io.in.map(_.valid))
val chosen = Mux(beat === 0.U, chosen_prio, chosen_reg)
val header_idx = if (headerBeats == 1) 0.U else beat(log2Ceil(headerBeats)-1,0)
io.out.valid := VecInit(io.in.map(_.valid))(chosen)
io.out.bits.phit := Mux(beat < headerBeats.U,
chosen.asTypeOf(Vec(headerBeats, UInt(phitWidth.W)))(header_idx),
VecInit(io.in.map(_.bits.phit))(chosen))
for (i <- 0 until channels) {
io.in(i).ready := io.out.ready && beat >= headerBeats.U && chosen_reg === i.U
}
when (io.out.fire) {
beat := Mux(beat === (beats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) { chosen_reg := chosen_prio }
}
}
}
class PhitDemux(phitWidth: Int, flitWidth: Int, channels: Int) extends Module {
override def desiredName = s"PhitDemux_p${phitWidth}_f${flitWidth}_n${channels}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Phit(phitWidth)))
val out = Vec(channels, Decoupled(new Phit(phitWidth)))
})
if (channels == 1) {
io.out(0) <> io.in
} else {
val headerWidth = log2Ceil(channels)
val headerBeats = (headerWidth - 1) / phitWidth + 1
val flitBeats = (flitWidth - 1) / phitWidth + 1
val beats = headerBeats + flitBeats
val beat = RegInit(0.U(log2Ceil(beats).W))
val channel_vec = Reg(Vec(headerBeats, UInt(phitWidth.W)))
val channel = channel_vec.asUInt(log2Ceil(channels)-1,0)
val header_idx = if (headerBeats == 1) 0.U else beat(log2Ceil(headerBeats)-1,0)
io.in.ready := beat < headerBeats.U || VecInit(io.out.map(_.ready))(channel)
for (c <- 0 until channels) {
io.out(c).valid := io.in.valid && beat >= headerBeats.U && channel === c.U
io.out(c).bits.phit := io.in.bits.phit
}
when (io.in.fire) {
beat := Mux(beat === (beats-1).U, 0.U, beat + 1.U)
when (beat < headerBeats.U) {
channel_vec(header_idx) := io.in.bits.phit
}
}
}
}
class DecoupledFlitToCreditedFlit(flitWidth: Int, bufferSz: Int) extends Module {
override def desiredName = s"DecoupledFlitToCreditedFlit_f${flitWidth}_b${bufferSz}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Flit(flitWidth))
val credit = Flipped(Decoupled(new Flit(flitWidth)))
})
val creditWidth = log2Ceil(bufferSz)
require(creditWidth <= flitWidth)
val credits = RegInit(0.U((creditWidth+1).W))
val credit_incr = io.out.fire
val credit_decr = io.credit.fire
when (credit_incr || credit_decr) {
credits := credits + credit_incr - Mux(io.credit.valid, io.credit.bits.flit +& 1.U, 0.U)
}
io.out.valid := io.in.valid && credits < bufferSz.U
io.out.bits.flit := io.in.bits.flit
io.in.ready := io.out.ready && credits < bufferSz.U
io.credit.ready := true.B
}
class CreditedFlitToDecoupledFlit(flitWidth: Int, bufferSz: Int) extends Module {
override def desiredName = s"CreditedFlitToDecoupledFlit_f${flitWidth}_b${bufferSz}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Flit(flitWidth))
val credit = Decoupled(new Flit(flitWidth))
})
val creditWidth = log2Ceil(bufferSz)
require(creditWidth <= flitWidth)
val buffer = Module(new Queue(new Flit(flitWidth), bufferSz))
val credits = RegInit(0.U((creditWidth+1).W))
val credit_incr = buffer.io.deq.fire
val credit_decr = io.credit.fire
when (credit_incr || credit_decr) {
credits := credit_incr + Mux(credit_decr, 0.U, credits)
}
buffer.io.enq.valid := io.in.valid
buffer.io.enq.bits := io.in.bits
io.in.ready := true.B
when (io.in.valid) { assert(buffer.io.enq.ready) }
io.out <> buffer.io.deq
io.credit.valid := credits =/= 0.U
io.credit.bits.flit := credits - 1.U
} | module PhitArbiter_p32_f32_n5(
input clock,
input reset,
output io_in_0_ready,
input io_in_0_valid,
input [31:0] io_in_0_bits_phit,
output io_in_1_ready,
input io_in_1_valid,
input [31:0] io_in_1_bits_phit,
output io_in_2_ready,
input io_in_2_valid,
input [31:0] io_in_2_bits_phit,
output io_in_3_ready,
input io_in_3_valid,
input [31:0] io_in_3_bits_phit,
output io_in_4_ready,
input io_in_4_valid,
input [31:0] io_in_4_bits_phit,
input io_out_ready,
output io_out_valid,
output [31:0] io_out_bits_phit
);
reg beat;
reg [2:0] chosen_reg;
wire [2:0] chosen_prio = io_in_0_valid ? 3'h0 : io_in_1_valid ? 3'h1 : io_in_2_valid ? 3'h2 : io_in_3_valid ? 3'h3 : 3'h4;
wire [2:0] chosen = beat ? chosen_reg : chosen_prio;
wire [7:0] _GEN = {{io_in_0_valid}, {io_in_0_valid}, {io_in_0_valid}, {io_in_4_valid}, {io_in_3_valid}, {io_in_2_valid}, {io_in_1_valid}, {io_in_0_valid}};
wire [7:0][31:0] _GEN_0 = {{io_in_0_bits_phit}, {io_in_0_bits_phit}, {io_in_0_bits_phit}, {io_in_4_bits_phit}, {io_in_3_bits_phit}, {io_in_2_bits_phit}, {io_in_1_bits_phit}, {io_in_0_bits_phit}};
wire _GEN_1 = io_out_ready & _GEN[chosen];
always @(posedge clock) begin
if (reset)
beat <= 1'h0;
else if (_GEN_1)
beat <= ~beat & beat - 1'h1;
if (_GEN_1 & ~beat)
chosen_reg <= chosen_prio;
end
assign io_in_0_ready = io_out_ready & beat & chosen_reg == 3'h0;
assign io_in_1_ready = io_out_ready & beat & chosen_reg == 3'h1;
assign io_in_2_ready = io_out_ready & beat & chosen_reg == 3'h2;
assign io_in_3_ready = io_out_ready & beat & chosen_reg == 3'h3;
assign io_in_4_ready = io_out_ready & beat & chosen_reg == 3'h4;
assign io_out_valid = _GEN[chosen];
assign io_out_bits_phit = beat ? _GEN_0[chosen] : {29'h0, chosen};
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util.{Cat}
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.tile.{CoreBundle, HasCoreParameters}
import freechips.rocketchip.util._
class BPControl(implicit p: Parameters) extends CoreBundle()(p) {
val ttype = UInt(4.W)
val dmode = Bool()
val maskmax = UInt(6.W)
val reserved = UInt((xLen - (if (coreParams.useBPWatch) 26 else 24)).W)
val action = UInt((if (coreParams.useBPWatch) 3 else 1).W)
val chain = Bool()
val zero = UInt(2.W)
val tmatch = UInt(2.W)
val m = Bool()
val h = Bool()
val s = Bool()
val u = Bool()
val x = Bool()
val w = Bool()
val r = Bool()
def tType = 2
def maskMax = 4
def enabled(mstatus: MStatus) = !mstatus.debug && Cat(m, h, s, u)(mstatus.prv)
}
class TExtra(implicit p: Parameters) extends CoreBundle()(p) {
def mvalueBits: Int = if (xLen == 32) coreParams.mcontextWidth min 6 else coreParams.mcontextWidth min 13
def svalueBits: Int = if (xLen == 32) coreParams.scontextWidth min 16 else coreParams.scontextWidth min 34
def mselectPos: Int = if (xLen == 32) 25 else 50
def mvaluePos : Int = mselectPos + 1
def sselectPos: Int = 0
def svaluePos : Int = 2
val mvalue = UInt(mvalueBits.W)
val mselect = Bool()
val pad2 = UInt((mselectPos - svalueBits - 2).W)
val svalue = UInt(svalueBits.W)
val pad1 = UInt(1.W)
val sselect = Bool()
}
class BP(implicit p: Parameters) extends CoreBundle()(p) {
val control = new BPControl
val address = UInt(vaddrBits.W)
val textra = new TExtra
def contextMatch(mcontext: UInt, scontext: UInt) =
(if (coreParams.mcontextWidth > 0) (!textra.mselect || (mcontext(textra.mvalueBits-1,0) === textra.mvalue)) else true.B) &&
(if (coreParams.scontextWidth > 0) (!textra.sselect || (scontext(textra.svalueBits-1,0) === textra.svalue)) else true.B)
def mask(dummy: Int = 0) =
(0 until control.maskMax-1).scanLeft(control.tmatch(0))((m, i) => m && address(i)).asUInt
def pow2AddressMatch(x: UInt) =
(~x | mask()) === (~address | mask())
def rangeAddressMatch(x: UInt) =
(x >= address) ^ control.tmatch(0)
def addressMatch(x: UInt) =
Mux(control.tmatch(1), rangeAddressMatch(x), pow2AddressMatch(x))
}
class BPWatch (val n: Int) extends Bundle() {
val valid = Vec(n, Bool())
val rvalid = Vec(n, Bool())
val wvalid = Vec(n, Bool())
val ivalid = Vec(n, Bool())
val action = UInt(3.W)
}
class BreakpointUnit(n: Int)(implicit val p: Parameters) extends Module with HasCoreParameters {
val io = IO(new Bundle {
val status = Input(new MStatus())
val bp = Input(Vec(n, new BP))
val pc = Input(UInt(vaddrBits.W))
val ea = Input(UInt(vaddrBits.W))
val mcontext = Input(UInt(coreParams.mcontextWidth.W))
val scontext = Input(UInt(coreParams.scontextWidth.W))
val xcpt_if = Output(Bool())
val xcpt_ld = Output(Bool())
val xcpt_st = Output(Bool())
val debug_if = Output(Bool())
val debug_ld = Output(Bool())
val debug_st = Output(Bool())
val bpwatch = Output(Vec(n, new BPWatch(1)))
})
io.xcpt_if := false.B
io.xcpt_ld := false.B
io.xcpt_st := false.B
io.debug_if := false.B
io.debug_ld := false.B
io.debug_st := false.B
(io.bpwatch zip io.bp).foldLeft((true.B, true.B, true.B)) { case ((ri, wi, xi), (bpw, bp)) =>
val en = bp.control.enabled(io.status)
val cx = bp.contextMatch(io.mcontext, io.scontext)
val r = en && bp.control.r && bp.addressMatch(io.ea) && cx
val w = en && bp.control.w && bp.addressMatch(io.ea) && cx
val x = en && bp.control.x && bp.addressMatch(io.pc) && cx
val end = !bp.control.chain
val action = bp.control.action
bpw.action := action
bpw.valid(0) := false.B
bpw.rvalid(0) := false.B
bpw.wvalid(0) := false.B
bpw.ivalid(0) := false.B
when (end && r && ri) { io.xcpt_ld := (action === 0.U); io.debug_ld := (action === 1.U); bpw.valid(0) := true.B; bpw.rvalid(0) := true.B }
when (end && w && wi) { io.xcpt_st := (action === 0.U); io.debug_st := (action === 1.U); bpw.valid(0) := true.B; bpw.wvalid(0) := true.B }
when (end && x && xi) { io.xcpt_if := (action === 0.U); io.debug_if := (action === 1.U); bpw.valid(0) := true.B; bpw.ivalid(0) := true.B }
(end || r, end || w, end || x)
}
} | module BreakpointUnit(
input io_status_debug,
input [1:0] io_status_prv,
input io_bp_0_control_action,
input [1:0] io_bp_0_control_tmatch,
input io_bp_0_control_m,
input io_bp_0_control_s,
input io_bp_0_control_u,
input io_bp_0_control_x,
input io_bp_0_control_w,
input io_bp_0_control_r,
input [38:0] io_bp_0_address,
input [38:0] io_pc,
input [38:0] io_ea,
output io_xcpt_if,
output io_xcpt_ld,
output io_xcpt_st,
output io_debug_if,
output io_debug_ld,
output io_debug_st
);
wire [3:0] _en_T_2 = {io_bp_0_control_m, 1'h0, io_bp_0_control_s, io_bp_0_control_u} >> io_status_prv;
wire en = ~io_status_debug & _en_T_2[0];
wire _w_T_2 = io_ea >= io_bp_0_address;
wire [38:0] _w_T_5 = ~io_ea;
wire _r_T_8 = io_bp_0_control_tmatch[0] & io_bp_0_address[0];
wire _r_T_10 = _r_T_8 & io_bp_0_address[1];
wire [38:0] _x_T_15 = ~io_bp_0_address;
wire _r_T_18 = io_bp_0_control_tmatch[0] & io_bp_0_address[0];
wire _r_T_20 = _r_T_18 & io_bp_0_address[1];
wire r = en & io_bp_0_control_r & (io_bp_0_control_tmatch[1] ? _w_T_2 ^ io_bp_0_control_tmatch[0] : {_w_T_5[38:4], _w_T_5[3:0] | {_r_T_10 & io_bp_0_address[2], _r_T_10, _r_T_8, io_bp_0_control_tmatch[0]}} == {_x_T_15[38:4], _x_T_15[3:0] | {_r_T_20 & io_bp_0_address[2], _r_T_20, _r_T_18, io_bp_0_control_tmatch[0]}});
wire _w_T_8 = io_bp_0_control_tmatch[0] & io_bp_0_address[0];
wire _w_T_10 = _w_T_8 & io_bp_0_address[1];
wire _w_T_18 = io_bp_0_control_tmatch[0] & io_bp_0_address[0];
wire _w_T_20 = _w_T_18 & io_bp_0_address[1];
wire w = en & io_bp_0_control_w & (io_bp_0_control_tmatch[1] ? _w_T_2 ^ io_bp_0_control_tmatch[0] : {_w_T_5[38:4], _w_T_5[3:0] | {_w_T_10 & io_bp_0_address[2], _w_T_10, _w_T_8, io_bp_0_control_tmatch[0]}} == {_x_T_15[38:4], _x_T_15[3:0] | {_w_T_20 & io_bp_0_address[2], _w_T_20, _w_T_18, io_bp_0_control_tmatch[0]}});
wire [38:0] _x_T_5 = ~io_pc;
wire _x_T_8 = io_bp_0_control_tmatch[0] & io_bp_0_address[0];
wire _x_T_10 = _x_T_8 & io_bp_0_address[1];
wire _x_T_18 = io_bp_0_control_tmatch[0] & io_bp_0_address[0];
wire _x_T_20 = _x_T_18 & io_bp_0_address[1];
wire x = en & io_bp_0_control_x & (io_bp_0_control_tmatch[1] ? io_pc >= io_bp_0_address ^ io_bp_0_control_tmatch[0] : {_x_T_5[38:4], _x_T_5[3:0] | {_x_T_10 & io_bp_0_address[2], _x_T_10, _x_T_8, io_bp_0_control_tmatch[0]}} == {_x_T_15[38:4], _x_T_15[3:0] | {_x_T_20 & io_bp_0_address[2], _x_T_20, _x_T_18, io_bp_0_control_tmatch[0]}});
assign io_xcpt_if = x & ~io_bp_0_control_action;
assign io_xcpt_ld = r & ~io_bp_0_control_action;
assign io_xcpt_st = w & ~io_bp_0_control_action;
assign io_debug_if = x & io_bp_0_control_action;
assign io_debug_ld = r & io_bp_0_control_action;
assign io_debug_st = w & io_bp_0_control_action;
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{Decoupled, DecoupledIO}
// A Repeater passes its input to its output, unless repeat is asserted.
// When repeat is asserted, the Repeater copies the input and repeats it next cycle.
class Repeater[T <: Data](gen: T) extends Module
{
override def desiredName = s"Repeater_${gen.typeName}"
val io = IO( new Bundle {
val repeat = Input(Bool())
val full = Output(Bool())
val enq = Flipped(Decoupled(gen.cloneType))
val deq = Decoupled(gen.cloneType)
} )
val full = RegInit(false.B)
val saved = Reg(gen.cloneType)
// When !full, a repeater is pass-through
io.deq.valid := io.enq.valid || full
io.enq.ready := io.deq.ready && !full
io.deq.bits := Mux(full, saved, io.enq.bits)
io.full := full
when (io.enq.fire && io.repeat) { full := true.B; saved := io.enq.bits }
when (io.deq.fire && !io.repeat) { full := false.B }
}
object Repeater
{
def apply[T <: Data](enq: DecoupledIO[T], repeat: Bool): DecoupledIO[T] = {
val repeater = Module(new Repeater(chiselTypeOf(enq.bits)))
repeater.io.repeat := repeat
repeater.io.enq <> enq
repeater.io.deq
}
} | module Repeater_TLBundleA_a21d64s7k1z3u(
input clock,
input reset,
input io_repeat,
output io_full,
output io_enq_ready,
input io_enq_valid,
input [2:0] io_enq_bits_opcode,
input [2:0] io_enq_bits_param,
input [2:0] io_enq_bits_size,
input [6:0] io_enq_bits_source,
input [20:0] io_enq_bits_address,
input [7:0] io_enq_bits_mask,
input io_enq_bits_corrupt,
input io_deq_ready,
output io_deq_valid,
output [2:0] io_deq_bits_opcode,
output [2:0] io_deq_bits_param,
output [2:0] io_deq_bits_size,
output [6:0] io_deq_bits_source,
output [20:0] io_deq_bits_address,
output [7:0] io_deq_bits_mask,
output io_deq_bits_corrupt
);
reg full;
reg [2:0] saved_opcode;
reg [2:0] saved_param;
reg [2:0] saved_size;
reg [6:0] saved_source;
reg [20:0] saved_address;
reg [7:0] saved_mask;
reg saved_corrupt;
wire io_deq_valid_0 = io_enq_valid | full;
wire io_enq_ready_0 = io_deq_ready & ~full;
wire _GEN = io_enq_ready_0 & io_enq_valid & io_repeat;
always @(posedge clock) begin
if (reset)
full <= 1'h0;
else
full <= ~(io_deq_ready & io_deq_valid_0 & ~io_repeat) & (_GEN | full);
if (_GEN) begin
saved_opcode <= io_enq_bits_opcode;
saved_param <= io_enq_bits_param;
saved_size <= io_enq_bits_size;
saved_source <= io_enq_bits_source;
saved_address <= io_enq_bits_address;
saved_mask <= io_enq_bits_mask;
saved_corrupt <= io_enq_bits_corrupt;
end
end
assign io_full = full;
assign io_enq_ready = io_enq_ready_0;
assign io_deq_valid = io_deq_valid_0;
assign io_deq_bits_opcode = full ? saved_opcode : io_enq_bits_opcode;
assign io_deq_bits_param = full ? saved_param : io_enq_bits_param;
assign io_deq_bits_size = full ? saved_size : io_enq_bits_size;
assign io_deq_bits_source = full ? saved_source : io_enq_bits_source;
assign io_deq_bits_address = full ? saved_address : io_enq_bits_address;
assign io_deq_bits_mask = full ? saved_mask : io_enq_bits_mask;
assign io_deq_bits_corrupt = full ? saved_corrupt : io_enq_bits_corrupt;
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{Decoupled, DecoupledIO}
// A Repeater passes its input to its output, unless repeat is asserted.
// When repeat is asserted, the Repeater copies the input and repeats it next cycle.
class Repeater[T <: Data](gen: T) extends Module
{
override def desiredName = s"Repeater_${gen.typeName}"
val io = IO( new Bundle {
val repeat = Input(Bool())
val full = Output(Bool())
val enq = Flipped(Decoupled(gen.cloneType))
val deq = Decoupled(gen.cloneType)
} )
val full = RegInit(false.B)
val saved = Reg(gen.cloneType)
// When !full, a repeater is pass-through
io.deq.valid := io.enq.valid || full
io.enq.ready := io.deq.ready && !full
io.deq.bits := Mux(full, saved, io.enq.bits)
io.full := full
when (io.enq.fire && io.repeat) { full := true.B; saved := io.enq.bits }
when (io.deq.fire && !io.repeat) { full := false.B }
}
object Repeater
{
def apply[T <: Data](enq: DecoupledIO[T], repeat: Bool): DecoupledIO[T] = {
val repeater = Module(new Repeater(chiselTypeOf(enq.bits)))
repeater.io.repeat := repeat
repeater.io.enq <> enq
repeater.io.deq
}
} | module Repeater_TLBundleD_a32d64s1k3z4u(
input clock,
input reset,
input io_repeat,
output io_enq_ready,
input io_enq_valid,
input [2:0] io_enq_bits_opcode,
input [1:0] io_enq_bits_param,
input [3:0] io_enq_bits_size,
input [2:0] io_enq_bits_sink,
input io_enq_bits_denied,
input [63:0] io_enq_bits_data,
input io_enq_bits_corrupt,
input io_deq_ready,
output io_deq_valid,
output [2:0] io_deq_bits_opcode,
output [1:0] io_deq_bits_param,
output [3:0] io_deq_bits_size,
output [2:0] io_deq_bits_sink,
output io_deq_bits_denied,
output [63:0] io_deq_bits_data,
output io_deq_bits_corrupt
);
reg full;
reg [2:0] saved_opcode;
reg [1:0] saved_param;
reg [3:0] saved_size;
reg [2:0] saved_sink;
reg saved_denied;
reg [63:0] saved_data;
reg saved_corrupt;
wire io_deq_valid_0 = io_enq_valid | full;
wire io_enq_ready_0 = io_deq_ready & ~full;
wire _GEN = io_enq_ready_0 & io_enq_valid & io_repeat;
always @(posedge clock) begin
if (reset)
full <= 1'h0;
else
full <= ~(io_deq_ready & io_deq_valid_0 & ~io_repeat) & (_GEN | full);
if (_GEN) begin
saved_opcode <= io_enq_bits_opcode;
saved_param <= io_enq_bits_param;
saved_size <= io_enq_bits_size;
saved_sink <= io_enq_bits_sink;
saved_denied <= io_enq_bits_denied;
saved_data <= io_enq_bits_data;
saved_corrupt <= io_enq_bits_corrupt;
end
end
assign io_enq_ready = io_enq_ready_0;
assign io_deq_valid = io_deq_valid_0;
assign io_deq_bits_opcode = full ? saved_opcode : io_enq_bits_opcode;
assign io_deq_bits_param = full ? saved_param : io_enq_bits_param;
assign io_deq_bits_size = full ? saved_size : io_enq_bits_size;
assign io_deq_bits_sink = full ? saved_sink : io_enq_bits_sink;
assign io_deq_bits_denied = full ? saved_denied : io_enq_bits_denied;
assign io_deq_bits_data = full ? saved_data : io_enq_bits_data;
assign io_deq_bits_corrupt = full ? saved_corrupt : io_enq_bits_corrupt;
endmodule |
Generate the Verilog code corresponding to this Chisel code package sifive.blocks.devices.uart
import chisel3._
import chisel3.util._
import freechips.rocketchip.util._
/** UARTRx module recivies serial input from Rx port and transmits them to Rx fifo in parallel
*
* ==Datapass==
* Port(Rx) -> sample -> shifter -> Rx fifo -> TL bus
*
* ==Structure==
* - baud rate divisor counter:
* generate pulse, the enable signal for sample and data shift
* - sample counter:
* sample happens in middle
* - data counter
* control signals for data shift process
* - sample and data shift logic
*
* ==State Machine==
* s_idle: detect start bit, init data_count and sample count, start pulse counter
* s_data: data reciving
*
* @note Rx fifo transmits Rx data to TL bus
*/
class UARTRx(c: UARTParams) extends Module {
val io = IO(new Bundle {
/** enable signal from top */
val en = Input(Bool())
/** input data from rx port */
val in = Input(UInt(1.W))
/** output data to Rx fifo */
val out = Valid(UInt(c.dataBits.W))
/** divisor bits */
val div = Input(UInt(c.divisorBits.W))
/** parity enable */
val enparity = c.includeParity.option(Input(Bool()))
/** parity select
*
* 0 -> even parity
* 1 -> odd parity
*/
val parity = c.includeParity.option(Input(Bool()))
/** parity error bit */
val errorparity = c.includeParity.option(Output(Bool()))
/** databit select
*
* ture -> 8
* false -> 9
*/
val data8or9 = (c.dataBits == 9).option(Input(Bool()))
})
if (c.includeParity)
io.errorparity.get := false.B
val debounce = RegInit(0.U(2.W))
val debounce_max = (debounce === 3.U)
val debounce_min = (debounce === 0.U)
val prescaler = Reg(UInt((c.divisorBits - c.oversample + 1).W))
val start = WireDefault(false.B)
/** enable signal for sampling and data shifting */
val pulse = (prescaler === 0.U)
private val dataCountBits = log2Floor(c.dataBits+c.includeParity.toInt) + 1
/** init = data bits(8 or 9) + parity bit(0 or 1) + start bit(1) */
val data_count = Reg(UInt(dataCountBits.W))
val data_last = (data_count === 0.U)
val parity_bit = (data_count === 1.U) && io.enparity.getOrElse(false.B)
val sample_count = Reg(UInt(c.oversample.W))
val sample_mid = (sample_count === ((c.oversampleFactor - c.nSamples + 1) >> 1).U)
// todo unused
val sample_last = (sample_count === 0.U)
/** counter for data and sample
*
* {{{
* | data_count | sample_count |
* }}}
*/
val countdown = Cat(data_count, sample_count) - 1.U
// Compensate for the divisor not being a multiple of the oversampling period.
// Let remainder k = (io.div % c.oversampleFactor).
// For the last k samples, extend the sampling delay by 1 cycle.
val remainder = io.div(c.oversample-1, 0)
val extend = (sample_count < remainder) // Pad head: (sample_count > ~remainder)
/** prescaler reset signal
*
* conditions:
* {{{
* start : transmisson starts
* pulse : returns ture every pluse counter period
* }}}
*/
val restore = start || pulse
val prescaler_in = Mux(restore, io.div >> c.oversample, prescaler)
val prescaler_next = prescaler_in - Mux(restore && extend, 0.U, 1.U)
/** buffer for sample results */
val sample = Reg(UInt(c.nSamples.W))
// take the majority bit of sample buffer
val voter = Majority(sample.asBools.toSet)
// data buffer
val shifter = Reg(UInt(c.dataBits.W))
val valid = RegInit(false.B)
valid := false.B
io.out.valid := valid
io.out.bits := (if (c.dataBits == 8) shifter else Mux(io.data8or9.get, Cat(0.U, shifter(8,1)), shifter))
val (s_idle :: s_data :: Nil) = Enum(2)
val state = RegInit(s_idle)
switch (state) {
is (s_idle) {
// todo !(!io.in)?
when (!(!io.in) && !debounce_min) {
debounce := debounce - 1.U
}
when (!io.in) {
debounce := debounce + 1.U
when (debounce_max) {
state := s_data
start := true.B
prescaler := prescaler_next
// init data_count
data_count := (c.dataBits+1).U + (if (c.includeParity) io.enparity.get else 0.U) - io.data8or9.getOrElse(false.B).asUInt
// init sample_count = 15
sample_count := (c.oversampleFactor - 1).U
}
}
}
is (s_data) {
prescaler := prescaler_next
when (pulse) {
// sample scan in
sample := Cat(sample, io.in)
data_count := countdown >> c.oversample
sample_count := countdown(c.oversample-1, 0)
when (sample_mid) {
if (c.includeParity) {
// act according to frame bit stage at its respective sampling point
// check parity bit for error
when (parity_bit) {
io.errorparity.get := (shifter.asBools.reduce(_ ^ _) ^ voter ^ io.parity.get)
}
when (data_last) {
state := s_idle
valid := true.B
} .elsewhen (!parity_bit) {
// do not add parity bit to final rx data
shifter := Cat(voter, shifter >> 1)
}
} else {
when (data_last) {
state := s_idle
valid := true.B
} .otherwise {
shifter := Cat(voter, shifter >> 1)
}
}
}
}
}
}
when (!io.en) {
debounce := 0.U
}
}
/*
Copyright 2016 SiFive, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/ | module UARTRx(
input clock,
input reset,
input io_en,
input io_in,
output io_out_valid,
output [7:0] io_out_bits,
input [15:0] io_div
);
reg [1:0] debounce;
reg [12:0] prescaler;
reg [3:0] data_count;
reg [3:0] sample_count;
reg [2:0] sample;
reg [7:0] shifter;
reg valid;
reg state;
wire [7:0] _countdown_T_1 = {data_count, sample_count} - 8'h1;
wire pulse = prescaler == 13'h0;
wire data_last = data_count == 4'h0;
wire sample_mid = sample_count == 4'h7;
wire _GEN = ~io_in & (&debounce);
wire _GEN_0 = _GEN | state;
wire _GEN_1 = state & pulse;
wire _GEN_2 = state & pulse & sample_mid;
wire restore = ~state & ~io_in & (&debounce) | pulse;
always @(posedge clock) begin
if (reset) begin
debounce <= 2'h0;
valid <= 1'h0;
state <= 1'h0;
end
else begin
if (io_en) begin
if (state) begin
end
else if (io_in) begin
if (io_in & (|debounce))
debounce <= debounce - 2'h1;
end
else
debounce <= debounce + 2'h1;
end
else
debounce <= 2'h0;
valid <= _GEN_2 & data_last;
if (state)
state <= ~(state & pulse & sample_mid & data_last) & state;
else
state <= _GEN_0;
end
if (_GEN_0)
prescaler <= (restore ? {1'h0, io_div[15:4]} : prescaler) - {12'h0, ~(restore & sample_count < io_div[3:0])};
if (state) begin
if (_GEN_1) begin
data_count <= _countdown_T_1[7:4];
sample_count <= _countdown_T_1[3:0];
end
end
else if (_GEN) begin
data_count <= 4'h9;
sample_count <= 4'hF;
end
if (_GEN_1)
sample <= {sample[1:0], io_in};
if (~state | ~_GEN_2 | data_last) begin
end
else
shifter <= {sample[0] & sample[1] | sample[0] & sample[2] | sample[1] & sample[2], shifter[7:1]};
end
assign io_out_valid = valid;
assign io_out_bits = shifter;
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3.{Data, SyncReadMem, Vec}
import chisel3.util.log2Ceil
object DescribedSRAM {
def apply[T <: Data](
name: String,
desc: String,
size: BigInt, // depth
data: T
): SyncReadMem[T] = {
val mem = SyncReadMem(size, data)
mem.suggestName(name)
val granWidth = data match {
case v: Vec[_] => v.head.getWidth
case d => d.getWidth
}
val uid = 0
Annotated.srams(
component = mem,
name = name,
address_width = log2Ceil(size),
data_width = data.getWidth,
depth = size,
description = desc,
write_mask_granularity = granWidth
)
mem
}
} | module array_1_0_0(
input [8:0] R0_addr,
input R0_en,
input R0_clk,
output [63:0] R0_data,
input [8:0] W0_addr,
input W0_en,
input W0_clk,
input [63:0] W0_data
);
array_0_0_0_ext array_0_0_0_ext (
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data)
);
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
} | module OptimizationBarrier_TLBEntryData(
input [19:0] io_x_ppn,
input io_x_u,
input io_x_ae_ptw,
input io_x_ae_final,
input io_x_pf,
input io_x_gf,
input io_x_sx,
input io_x_px,
output [19:0] io_y_ppn,
output io_y_u,
output io_y_ae_ptw,
output io_y_ae_final,
output io_y_pf,
output io_y_gf,
output io_y_sx,
output io_y_px
);
assign io_y_ppn = io_x_ppn;
assign io_y_u = io_x_u;
assign io_y_ae_ptw = io_x_ae_ptw;
assign io_y_ae_final = io_x_ae_final;
assign io_y_pf = io_x_pf;
assign io_y_gf = io_x_gf;
assign io_y_sx = io_x_sx;
assign io_y_px = io_x_px;
endmodule |
Generate the Verilog code corresponding to this Chisel code //******************************************************************************
// Copyright (c) 2015 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
package boom.v3.exu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket.Instructions32
import freechips.rocketchip.rocket.CustomInstructions._
import freechips.rocketchip.rocket.RVCExpander
import freechips.rocketchip.rocket.{CSR,Causes}
import freechips.rocketchip.util.{uintToBitPat,UIntIsOneOf}
import FUConstants._
import boom.v3.common._
import boom.v3.util._
// scalastyle:off
/**
* Abstract trait giving defaults and other relevant values to different Decode constants/
*/
abstract trait DecodeConstants
extends freechips.rocketchip.rocket.constants.ScalarOpConstants
with freechips.rocketchip.rocket.constants.MemoryOpConstants
{
val xpr64 = Y // TODO inform this from xLen
val DC2 = BitPat.dontCare(2) // Makes the listing below more readable
def decode_default: List[BitPat] =
// frs3_en wakeup_delay
// is val inst? | imm sel | bypassable (aka, known/fixed latency)
// | is fp inst? | | uses_ldq | | is_br
// | | is single-prec? rs1 regtype | | | uses_stq | | |
// | | | micro-code | rs2 type| | | | is_amo | | |
// | | | | iq-type func unit | | | | | | | is_fence | | |
// | | | | | | | | | | | | | | is_fencei | | | is breakpoint or ecall?
// | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
// | | | | | | | | | | | | | | | | | | | | | | | |
List(N, N, X, uopX , IQT_INT, FU_X , RT_X , DC2 ,DC2 ,X, IS_X, X, X, X, X, N, M_X, DC2, X, X, N, N, X, CSR.X)
val table: Array[(BitPat, List[BitPat])]
}
// scalastyle:on
/**
* Decoded control signals
*/
class CtrlSigs extends Bundle
{
val legal = Bool()
val fp_val = Bool()
val fp_single = Bool()
val uopc = UInt(UOPC_SZ.W)
val iq_type = UInt(IQT_SZ.W)
val fu_code = UInt(FUC_SZ.W)
val dst_type = UInt(2.W)
val rs1_type = UInt(2.W)
val rs2_type = UInt(2.W)
val frs3_en = Bool()
val imm_sel = UInt(IS_X.getWidth.W)
val uses_ldq = Bool()
val uses_stq = Bool()
val is_amo = Bool()
val is_fence = Bool()
val is_fencei = Bool()
val mem_cmd = UInt(freechips.rocketchip.rocket.M_SZ.W)
val wakeup_delay = UInt(2.W)
val bypassable = Bool()
val is_br = Bool()
val is_sys_pc2epc = Bool()
val inst_unique = Bool()
val flush_on_commit = Bool()
val csr_cmd = UInt(freechips.rocketchip.rocket.CSR.SZ.W)
val rocc = Bool()
def decode(inst: UInt, table: Iterable[(BitPat, List[BitPat])]) = {
val decoder = freechips.rocketchip.rocket.DecodeLogic(inst, XDecode.decode_default, table)
val sigs =
Seq(legal, fp_val, fp_single, uopc, iq_type, fu_code, dst_type, rs1_type,
rs2_type, frs3_en, imm_sel, uses_ldq, uses_stq, is_amo,
is_fence, is_fencei, mem_cmd, wakeup_delay, bypassable,
is_br, is_sys_pc2epc, inst_unique, flush_on_commit, csr_cmd)
sigs zip decoder map {case(s,d) => s := d}
rocc := false.B
this
}
}
// scalastyle:off
/**
* Decode constants for RV32
*/
object X32Decode extends DecodeConstants
{
// frs3_en wakeup_delay
// is val inst? | imm sel | bypassable (aka, known/fixed latency)
// | is fp inst? | | uses_ldq | | is_br
// | | is single-prec? rs1 regtype | | | uses_stq | | |
// | | | micro-code | rs2 type| | | | is_amo | | |
// | | | | iq-type func unit | | | | | | | is_fence | | |
// | | | | | | | | | | | | | | is_fencei | | | is breakpoint or ecall?
// | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
val table: Array[(BitPat, List[BitPat])] = Array(// | | | | | | | | | | | | | | | | | |
Instructions32.SLLI ->
List(Y, N, X, uopSLLI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
Instructions32.SRLI ->
List(Y, N, X, uopSRLI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
Instructions32.SRAI ->
List(Y, N, X, uopSRAI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N)
)
}
/**
* Decode constants for RV64
*/
object X64Decode extends DecodeConstants
{
// frs3_en wakeup_delay
// is val inst? | imm sel | bypassable (aka, known/fixed latency)
// | is fp inst? | | uses_ldq | | is_br
// | | is single-prec? rs1 regtype | | | uses_stq | | |
// | | | micro-code | rs2 type| | | | is_amo | | |
// | | | | iq-type func unit | | | | | | | is_fence | | |
// | | | | | | | | | | | | | | is_fencei | | | is breakpoint or ecall?
// | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
val table: Array[(BitPat, List[BitPat])] = Array(// | | | | | | | | | | | | | | | | | |
LD -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N),
LWU -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N),
SD -> List(Y, N, X, uopSTA , IQT_MEM, FU_MEM , RT_X , RT_FIX, RT_FIX, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N),
SLLI -> List(Y, N, X, uopSLLI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRLI -> List(Y, N, X, uopSRLI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRAI -> List(Y, N, X, uopSRAI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
ADDIW -> List(Y, N, X, uopADDIW, IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SLLIW -> List(Y, N, X, uopSLLIW, IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRAIW -> List(Y, N, X, uopSRAIW, IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRLIW -> List(Y, N, X, uopSRLIW, IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
ADDW -> List(Y, N, X, uopADDW , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SUBW -> List(Y, N, X, uopSUBW , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SLLW -> List(Y, N, X, uopSLLW , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRAW -> List(Y, N, X, uopSRAW , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRLW -> List(Y, N, X, uopSRLW , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N)
)
}
/**
* Overall Decode constants
*/
object XDecode extends DecodeConstants
{
// frs3_en wakeup_delay
// is val inst? | imm sel | bypassable (aka, known/fixed latency)
// | is fp inst? | | uses_ldq | | is_br
// | | is single-prec? rs1 regtype | | | uses_stq | | |
// | | | micro-code | rs2 type| | | | is_amo | | |
// | | | | iq-type func unit | | | | | | | is_fence | | |
// | | | | | | | | | | | | | | is_fencei | | | is breakpoint or ecall?
// | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
val table: Array[(BitPat, List[BitPat])] = Array(// | | | | | | | | | | | | | | | | | |
LW -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N),
LH -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N),
LHU -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N),
LB -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N),
LBU -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N),
SW -> List(Y, N, X, uopSTA , IQT_MEM, FU_MEM , RT_X , RT_FIX, RT_FIX, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N),
SH -> List(Y, N, X, uopSTA , IQT_MEM, FU_MEM , RT_X , RT_FIX, RT_FIX, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N),
SB -> List(Y, N, X, uopSTA , IQT_MEM, FU_MEM , RT_X , RT_FIX, RT_FIX, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N),
LUI -> List(Y, N, X, uopLUI , IQT_INT, FU_ALU , RT_FIX, RT_X , RT_X , N, IS_U, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
ADDI -> List(Y, N, X, uopADDI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
ANDI -> List(Y, N, X, uopANDI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
ORI -> List(Y, N, X, uopORI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
XORI -> List(Y, N, X, uopXORI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SLTI -> List(Y, N, X, uopSLTI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SLTIU -> List(Y, N, X, uopSLTIU, IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SLL -> List(Y, N, X, uopSLL , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
ADD -> List(Y, N, X, uopADD , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SUB -> List(Y, N, X, uopSUB , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SLT -> List(Y, N, X, uopSLT , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SLTU -> List(Y, N, X, uopSLTU , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
AND -> List(Y, N, X, uopAND , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
OR -> List(Y, N, X, uopOR , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
XOR -> List(Y, N, X, uopXOR , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRA -> List(Y, N, X, uopSRA , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRL -> List(Y, N, X, uopSRL , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
MUL -> List(Y, N, X, uopMUL , IQT_INT, FU_MUL , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
MULH -> List(Y, N, X, uopMULH , IQT_INT, FU_MUL , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
MULHU -> List(Y, N, X, uopMULHU, IQT_INT, FU_MUL , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
MULHSU -> List(Y, N, X, uopMULHSU,IQT_INT, FU_MUL , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
MULW -> List(Y, N, X, uopMULW , IQT_INT, FU_MUL , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
DIV -> List(Y, N, X, uopDIV , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
DIVU -> List(Y, N, X, uopDIVU , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
REM -> List(Y, N, X, uopREM , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
REMU -> List(Y, N, X, uopREMU , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
DIVW -> List(Y, N, X, uopDIVW , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
DIVUW -> List(Y, N, X, uopDIVUW, IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
REMW -> List(Y, N, X, uopREMW , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
REMUW -> List(Y, N, X, uopREMUW, IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
AUIPC -> List(Y, N, X, uopAUIPC, IQT_INT, FU_JMP , RT_FIX, RT_X , RT_X , N, IS_U, N, N, N, N, N, M_X , 1.U, N, N, N, N, N, CSR.N), // use BRU for the PC read
JAL -> List(Y, N, X, uopJAL , IQT_INT, FU_JMP , RT_FIX, RT_X , RT_X , N, IS_J, N, N, N, N, N, M_X , 1.U, N, N, N, N, N, CSR.N),
JALR -> List(Y, N, X, uopJALR , IQT_INT, FU_JMP , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, N, N, N, N, N, CSR.N),
BEQ -> List(Y, N, X, uopBEQ , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N),
BNE -> List(Y, N, X, uopBNE , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N),
BGE -> List(Y, N, X, uopBGE , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N),
BGEU -> List(Y, N, X, uopBGEU , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N),
BLT -> List(Y, N, X, uopBLT , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N),
BLTU -> List(Y, N, X, uopBLTU , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N),
// I-type, the immediate12 holds the CSR register.
CSRRW -> List(Y, N, X, uopCSRRW, IQT_INT, FU_CSR , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.W),
CSRRS -> List(Y, N, X, uopCSRRS, IQT_INT, FU_CSR , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.S),
CSRRC -> List(Y, N, X, uopCSRRC, IQT_INT, FU_CSR , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.C),
CSRRWI -> List(Y, N, X, uopCSRRWI,IQT_INT, FU_CSR , RT_FIX, RT_PAS, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.W),
CSRRSI -> List(Y, N, X, uopCSRRSI,IQT_INT, FU_CSR , RT_FIX, RT_PAS, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.S),
CSRRCI -> List(Y, N, X, uopCSRRCI,IQT_INT, FU_CSR , RT_FIX, RT_PAS, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.C),
SFENCE_VMA->List(Y,N, X, uopSFENCE,IQT_MEM, FU_MEM , RT_X , RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N,M_SFENCE,0.U,N, N, N, Y, Y, CSR.N),
ECALL -> List(Y, N, X, uopERET ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, Y, Y, Y, CSR.I),
EBREAK -> List(Y, N, X, uopERET ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, Y, Y, Y, CSR.I),
SRET -> List(Y, N, X, uopERET ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.I),
MRET -> List(Y, N, X, uopERET ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.I),
DRET -> List(Y, N, X, uopERET ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.I),
WFI -> List(Y, N, X, uopWFI ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.I),
FENCE_I -> List(Y, N, X, uopNOP , IQT_INT, FU_X , RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, Y, M_X , 0.U, N, N, N, Y, Y, CSR.N),
FENCE -> List(Y, N, X, uopFENCE, IQT_INT, FU_MEM , RT_X , RT_X , RT_X , N, IS_X, N, Y, N, Y, N, M_X , 0.U, N, N, N, Y, Y, CSR.N), // TODO PERF make fence higher performance
// currently serializes pipeline
// frs3_en wakeup_delay
// is val inst? | imm sel | bypassable (aka, known/fixed latency)
// | is fp inst? | | uses_ldq | | is_br
// | | is single-prec? rs1 regtype | | | uses_stq | | |
// | | | micro-code | rs2 type| | | | is_amo | | |
// | | | | iq-type func unit | | | | | | | is_fence | | |
// | | | | | | | | | | | | | | is_fencei | | | is breakpoint or ecall?
// | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
// A-type | | | | | | | | | | | | | | | | | | | | | | | |
AMOADD_W-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_ADD, 0.U,N, N, N, Y, Y, CSR.N), // TODO make AMOs higherperformance
AMOXOR_W-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_XOR, 0.U,N, N, N, Y, Y, CSR.N),
AMOSWAP_W->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_SWAP,0.U,N, N, N, Y, Y, CSR.N),
AMOAND_W-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_AND, 0.U,N, N, N, Y, Y, CSR.N),
AMOOR_W -> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_OR, 0.U,N, N, N, Y, Y, CSR.N),
AMOMIN_W-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MIN, 0.U,N, N, N, Y, Y, CSR.N),
AMOMINU_W->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MINU,0.U,N, N, N, Y, Y, CSR.N),
AMOMAX_W-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MAX, 0.U,N, N, N, Y, Y, CSR.N),
AMOMAXU_W->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MAXU,0.U,N, N, N, Y, Y, CSR.N),
AMOADD_D-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_ADD, 0.U,N, N, N, Y, Y, CSR.N),
AMOXOR_D-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_XOR, 0.U,N, N, N, Y, Y, CSR.N),
AMOSWAP_D->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_SWAP,0.U,N, N, N, Y, Y, CSR.N),
AMOAND_D-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_AND, 0.U,N, N, N, Y, Y, CSR.N),
AMOOR_D -> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_OR, 0.U,N, N, N, Y, Y, CSR.N),
AMOMIN_D-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MIN, 0.U,N, N, N, Y, Y, CSR.N),
AMOMINU_D->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MINU,0.U,N, N, N, Y, Y, CSR.N),
AMOMAX_D-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MAX, 0.U,N, N, N, Y, Y, CSR.N),
AMOMAXU_D->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MAXU,0.U,N, N, N, Y, Y, CSR.N),
LR_W -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_X , N, IS_X, Y, N, N, N, N, M_XLR , 0.U,N, N, N, Y, Y, CSR.N),
LR_D -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_X , N, IS_X, Y, N, N, N, N, M_XLR , 0.U,N, N, N, Y, Y, CSR.N),
SC_W -> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XSC , 0.U,N, N, N, Y, Y, CSR.N),
SC_D -> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XSC , 0.U,N, N, N, Y, Y, CSR.N)
)
}
/**
* FP Decode constants
*/
object FDecode extends DecodeConstants
{
val table: Array[(BitPat, List[BitPat])] = Array(
// frs3_en wakeup_delay
// | imm sel | bypassable (aka, known/fixed latency)
// | | uses_ldq | | is_br
// is val inst? rs1 regtype | | | uses_stq | | |
// | is fp inst? | rs2 type| | | | is_amo | | |
// | | is dst single-prec? | | | | | | | is_fence | | |
// | | | micro-opcode | | | | | | | | is_fencei | | | is breakpoint or ecall
// | | | | iq_type func dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | unit regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
FLW -> List(Y, Y, Y, uopLD , IQT_MEM, FU_MEM, RT_FLT, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 0.U, N, N, N, N, N, CSR.N),
FLD -> List(Y, Y, N, uopLD , IQT_MEM, FU_MEM, RT_FLT, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 0.U, N, N, N, N, N, CSR.N),
FSW -> List(Y, Y, Y, uopSTA , IQT_MFP,FU_F2IMEM,RT_X , RT_FIX, RT_FLT, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N), // sort of a lie; broken into two micro-ops
FSD -> List(Y, Y, N, uopSTA , IQT_MFP,FU_F2IMEM,RT_X , RT_FIX, RT_FLT, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N),
FCLASS_S-> List(Y, Y, Y, uopFCLASS_S,IQT_FP , FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCLASS_D-> List(Y, Y, N, uopFCLASS_D,IQT_FP , FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMV_W_X -> List(Y, Y, Y, uopFMV_W_X, IQT_INT, FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMV_D_X -> List(Y, Y, N, uopFMV_D_X, IQT_INT, FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMV_X_W -> List(Y, Y, Y, uopFMV_X_W, IQT_FP , FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMV_X_D -> List(Y, Y, N, uopFMV_X_D, IQT_FP , FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSGNJ_S -> List(Y, Y, Y, uopFSGNJ_S, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSGNJ_D -> List(Y, Y, N, uopFSGNJ_D, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSGNJX_S-> List(Y, Y, Y, uopFSGNJ_S, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSGNJX_D-> List(Y, Y, N, uopFSGNJ_D, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSGNJN_S-> List(Y, Y, Y, uopFSGNJ_S, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSGNJN_D-> List(Y, Y, N, uopFSGNJ_D, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
// FP to FP
FCVT_S_D-> List(Y, Y, Y, uopFCVT_S_D,IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_D_S-> List(Y, Y, N, uopFCVT_D_S,IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
// Int to FP
FCVT_S_W-> List(Y, Y, Y, uopFCVT_S_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_S_WU->List(Y, Y, Y, uopFCVT_S_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_S_L-> List(Y, Y, Y, uopFCVT_S_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_S_LU->List(Y, Y, Y, uopFCVT_S_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_D_W-> List(Y, Y, N, uopFCVT_D_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_D_WU->List(Y, Y, N, uopFCVT_D_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_D_L-> List(Y, Y, N, uopFCVT_D_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_D_LU->List(Y, Y, N, uopFCVT_D_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
// FP to Int
FCVT_W_S-> List(Y, Y, Y, uopFCVT_X_S, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_WU_S->List(Y, Y, Y, uopFCVT_X_S, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_L_S-> List(Y, Y, Y, uopFCVT_X_S, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_LU_S->List(Y, Y, Y, uopFCVT_X_S, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_W_D-> List(Y, Y, N, uopFCVT_X_D, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_WU_D->List(Y, Y, N, uopFCVT_X_D, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_L_D-> List(Y, Y, N, uopFCVT_X_D, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_LU_D->List(Y, Y, N, uopFCVT_X_D, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
// "fp_single" is used for wb_data formatting (and debugging)
FEQ_S ->List(Y, Y, Y, uopCMPR_S , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FLT_S ->List(Y, Y, Y, uopCMPR_S , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FLE_S ->List(Y, Y, Y, uopCMPR_S , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FEQ_D ->List(Y, Y, N, uopCMPR_D , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FLT_D ->List(Y, Y, N, uopCMPR_D , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FLE_D ->List(Y, Y, N, uopCMPR_D , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMIN_S ->List(Y, Y, Y,uopFMINMAX_S,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMAX_S ->List(Y, Y, Y,uopFMINMAX_S,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMIN_D ->List(Y, Y, N,uopFMINMAX_D,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMAX_D ->List(Y, Y, N,uopFMINMAX_D,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FADD_S ->List(Y, Y, Y, uopFADD_S , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSUB_S ->List(Y, Y, Y, uopFSUB_S , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMUL_S ->List(Y, Y, Y, uopFMUL_S , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FADD_D ->List(Y, Y, N, uopFADD_D , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSUB_D ->List(Y, Y, N, uopFSUB_D , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMUL_D ->List(Y, Y, N, uopFMUL_D , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMADD_S ->List(Y, Y, Y, uopFMADD_S, IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMSUB_S ->List(Y, Y, Y, uopFMSUB_S, IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FNMADD_S ->List(Y, Y, Y, uopFNMADD_S,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FNMSUB_S ->List(Y, Y, Y, uopFNMSUB_S,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMADD_D ->List(Y, Y, N, uopFMADD_D, IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMSUB_D ->List(Y, Y, N, uopFMSUB_D, IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FNMADD_D ->List(Y, Y, N, uopFNMADD_D,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FNMSUB_D ->List(Y, Y, N, uopFNMSUB_D,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N)
)
}
/**
* FP Divide SquareRoot Constants
*/
object FDivSqrtDecode extends DecodeConstants
{
val table: Array[(BitPat, List[BitPat])] = Array(
// frs3_en wakeup_delay
// | imm sel | bypassable (aka, known/fixed latency)
// | | uses_ldq | | is_br
// is val inst? rs1 regtype | | | uses_stq | | |
// | is fp inst? | rs2 type| | | | is_amo | | |
// | | is dst single-prec? | | | | | | | is_fence | | |
// | | | micro-opcode | | | | | | | | is_fencei | | | is breakpoint or ecall
// | | | | iq-type func dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | unit regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
FDIV_S ->List(Y, Y, Y, uopFDIV_S , IQT_FP, FU_FDV, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FDIV_D ->List(Y, Y, N, uopFDIV_D , IQT_FP, FU_FDV, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSQRT_S ->List(Y, Y, Y, uopFSQRT_S, IQT_FP, FU_FDV, RT_FLT, RT_FLT, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSQRT_D ->List(Y, Y, N, uopFSQRT_D, IQT_FP, FU_FDV, RT_FLT, RT_FLT, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N)
)
}
//scalastyle:on
/**
* RoCC initial decode
*/
object RoCCDecode extends DecodeConstants
{
// Note: We use FU_CSR since CSR instructions cannot co-execute with RoCC instructions
// frs3_en wakeup_delay
// is val inst? | imm sel | bypassable (aka, known/fixed latency)
// | is fp inst? | | uses_ldq | | is_br
// | | is single-prec rs1 regtype | | | uses_stq | | |
// | | | | rs2 type| | | | is_amo | | |
// | | | micro-code func unit | | | | | | | is_fence | | |
// | | | | iq-type | | | | | | | | | is_fencei | | | is breakpoint or ecall?
// | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
// | | | | | | | | | | | | | | | | | | | | | | | |
val table: Array[(BitPat, List[BitPat])] = Array(// | | | | | | | | | | | | | | | | | | |
CUSTOM0 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM0_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM0_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM0_RD ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM0_RD_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM0_RD_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM1_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM1_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM1_RD ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM1_RD_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM1_RD_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM2_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM2_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM2_RD ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM2_RD_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM2_RD_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM3 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM3_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM3_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM3_RD ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM3_RD_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM3_RD_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N)
)
}
/**
* IO bundle for the Decode unit
*/
class DecodeUnitIo(implicit p: Parameters) extends BoomBundle
{
val enq = new Bundle { val uop = Input(new MicroOp()) }
val deq = new Bundle { val uop = Output(new MicroOp()) }
// from CSRFile
val status = Input(new freechips.rocketchip.rocket.MStatus())
val csr_decode = Flipped(new freechips.rocketchip.rocket.CSRDecodeIO)
val interrupt = Input(Bool())
val interrupt_cause = Input(UInt(xLen.W))
}
/**
* Decode unit that takes in a single instruction and generates a MicroOp.
*/
class DecodeUnit(implicit p: Parameters) extends BoomModule
with freechips.rocketchip.rocket.constants.MemoryOpConstants
{
val io = IO(new DecodeUnitIo)
val uop = Wire(new MicroOp())
uop := io.enq.uop
var decode_table = XDecode.table
if (usingFPU) decode_table ++= FDecode.table
if (usingFPU && usingFDivSqrt) decode_table ++= FDivSqrtDecode.table
if (usingRoCC) decode_table ++= RoCCDecode.table
decode_table ++= (if (xLen == 64) X64Decode.table else X32Decode.table)
val inst = uop.inst
val cs = Wire(new CtrlSigs()).decode(inst, decode_table)
// Exception Handling
io.csr_decode.inst := inst
val csr_en = cs.csr_cmd.isOneOf(CSR.S, CSR.C, CSR.W)
val csr_ren = cs.csr_cmd.isOneOf(CSR.S, CSR.C) && uop.lrs1 === 0.U
val system_insn = cs.csr_cmd === CSR.I
val sfence = cs.uopc === uopSFENCE
val cs_legal = cs.legal
// dontTouch(cs_legal)
val id_illegal_insn = !cs_legal ||
cs.fp_val && io.csr_decode.fp_illegal || // TODO check for illegal rm mode: (io.fpu.illegal_rm)
cs.rocc && io.csr_decode.rocc_illegal ||
cs.is_amo && !io.status.isa('a'-'a') ||
(cs.fp_val && !cs.fp_single) && !io.status.isa('d'-'a') ||
csr_en && (io.csr_decode.read_illegal || !csr_ren && io.csr_decode.write_illegal) ||
((sfence || system_insn) && io.csr_decode.system_illegal)
// cs.div && !csr.io.status.isa('m'-'a') || TODO check for illegal div instructions
def checkExceptions(x: Seq[(Bool, UInt)]) =
(x.map(_._1).reduce(_||_), PriorityMux(x))
val (xcpt_valid, xcpt_cause) = checkExceptions(List(
(io.interrupt && !io.enq.uop.is_sfb, io.interrupt_cause), // Disallow interrupts while we are handling a SFB
(uop.bp_debug_if, (CSR.debugTriggerCause).U),
(uop.bp_xcpt_if, (Causes.breakpoint).U),
(uop.xcpt_pf_if, (Causes.fetch_page_fault).U),
(uop.xcpt_ae_if, (Causes.fetch_access).U),
(id_illegal_insn, (Causes.illegal_instruction).U)))
uop.exception := xcpt_valid
uop.exc_cause := xcpt_cause
//-------------------------------------------------------------
uop.uopc := cs.uopc
uop.iq_type := cs.iq_type
uop.fu_code := cs.fu_code
// x-registers placed in 0-31, f-registers placed in 32-63.
// This allows us to straight-up compare register specifiers and not need to
// verify the rtypes (e.g., bypassing in rename).
uop.ldst := inst(RD_MSB,RD_LSB)
uop.lrs1 := inst(RS1_MSB,RS1_LSB)
uop.lrs2 := inst(RS2_MSB,RS2_LSB)
uop.lrs3 := inst(RS3_MSB,RS3_LSB)
uop.ldst_val := cs.dst_type =/= RT_X && !(uop.ldst === 0.U && uop.dst_rtype === RT_FIX)
uop.dst_rtype := cs.dst_type
uop.lrs1_rtype := cs.rs1_type
uop.lrs2_rtype := cs.rs2_type
uop.frs3_en := cs.frs3_en
uop.ldst_is_rs1 := uop.is_sfb_shadow
// SFB optimization
when (uop.is_sfb_shadow && cs.rs2_type === RT_X) {
uop.lrs2_rtype := RT_FIX
uop.lrs2 := inst(RD_MSB,RD_LSB)
uop.ldst_is_rs1 := false.B
} .elsewhen (uop.is_sfb_shadow && cs.uopc === uopADD && inst(RS1_MSB,RS1_LSB) === 0.U) {
uop.uopc := uopMOV
uop.lrs1 := inst(RD_MSB, RD_LSB)
uop.ldst_is_rs1 := true.B
}
when (uop.is_sfb_br) {
uop.fu_code := FU_JMP
}
uop.fp_val := cs.fp_val
uop.fp_single := cs.fp_single // TODO use this signal instead of the FPU decode's table signal?
uop.mem_cmd := cs.mem_cmd
uop.mem_size := Mux(cs.mem_cmd.isOneOf(M_SFENCE, M_FLUSH_ALL), Cat(uop.lrs2 =/= 0.U, uop.lrs1 =/= 0.U), inst(13,12))
uop.mem_signed := !inst(14)
uop.uses_ldq := cs.uses_ldq
uop.uses_stq := cs.uses_stq
uop.is_amo := cs.is_amo
uop.is_fence := cs.is_fence
uop.is_fencei := cs.is_fencei
uop.is_sys_pc2epc := cs.is_sys_pc2epc
uop.is_unique := cs.inst_unique
uop.flush_on_commit := cs.flush_on_commit || (csr_en && !csr_ren && io.csr_decode.write_flush)
uop.bypassable := cs.bypassable
//-------------------------------------------------------------
// immediates
// repackage the immediate, and then pass the fewest number of bits around
val di24_20 = Mux(cs.imm_sel === IS_B || cs.imm_sel === IS_S, inst(11,7), inst(24,20))
uop.imm_packed := Cat(inst(31,25), di24_20, inst(19,12))
//-------------------------------------------------------------
uop.is_br := cs.is_br
uop.is_jal := (uop.uopc === uopJAL)
uop.is_jalr := (uop.uopc === uopJALR)
// uop.is_jump := cs.is_jal || (uop.uopc === uopJALR)
// uop.is_ret := (uop.uopc === uopJALR) &&
// (uop.ldst === X0) &&
// (uop.lrs1 === RA)
// uop.is_call := (uop.uopc === uopJALR || uop.uopc === uopJAL) &&
// (uop.ldst === RA)
//-------------------------------------------------------------
io.deq.uop := uop
}
/**
* Smaller Decode unit for the Frontend to decode different
* branches.
* Accepts EXPANDED RVC instructions
*/
class BranchDecodeSignals(implicit p: Parameters) extends BoomBundle
{
val is_ret = Bool()
val is_call = Bool()
val target = UInt(vaddrBitsExtended.W)
val cfi_type = UInt(CFI_SZ.W)
// Is this branch a short forwards jump?
val sfb_offset = Valid(UInt(log2Ceil(icBlockBytes).W))
// Is this instruction allowed to be inside a sfb?
val shadowable = Bool()
}
class BranchDecode(implicit p: Parameters) extends BoomModule
{
val io = IO(new Bundle {
val inst = Input(UInt(32.W))
val pc = Input(UInt(vaddrBitsExtended.W))
val out = Output(new BranchDecodeSignals)
})
val bpd_csignals =
freechips.rocketchip.rocket.DecodeLogic(io.inst,
List[BitPat](N, N, N, N, X),
//// is br?
//// | is jal?
//// | | is jalr?
//// | | |
//// | | | shadowable
//// | | | | has_rs2
//// | | | | |
Array[(BitPat, List[BitPat])](
JAL -> List(N, Y, N, N, X),
JALR -> List(N, N, Y, N, X),
BEQ -> List(Y, N, N, N, X),
BNE -> List(Y, N, N, N, X),
BGE -> List(Y, N, N, N, X),
BGEU -> List(Y, N, N, N, X),
BLT -> List(Y, N, N, N, X),
BLTU -> List(Y, N, N, N, X),
SLLI -> List(N, N, N, Y, N),
SRLI -> List(N, N, N, Y, N),
SRAI -> List(N, N, N, Y, N),
ADDIW -> List(N, N, N, Y, N),
SLLIW -> List(N, N, N, Y, N),
SRAIW -> List(N, N, N, Y, N),
SRLIW -> List(N, N, N, Y, N),
ADDW -> List(N, N, N, Y, Y),
SUBW -> List(N, N, N, Y, Y),
SLLW -> List(N, N, N, Y, Y),
SRAW -> List(N, N, N, Y, Y),
SRLW -> List(N, N, N, Y, Y),
LUI -> List(N, N, N, Y, N),
ADDI -> List(N, N, N, Y, N),
ANDI -> List(N, N, N, Y, N),
ORI -> List(N, N, N, Y, N),
XORI -> List(N, N, N, Y, N),
SLTI -> List(N, N, N, Y, N),
SLTIU -> List(N, N, N, Y, N),
SLL -> List(N, N, N, Y, Y),
ADD -> List(N, N, N, Y, Y),
SUB -> List(N, N, N, Y, Y),
SLT -> List(N, N, N, Y, Y),
SLTU -> List(N, N, N, Y, Y),
AND -> List(N, N, N, Y, Y),
OR -> List(N, N, N, Y, Y),
XOR -> List(N, N, N, Y, Y),
SRA -> List(N, N, N, Y, Y),
SRL -> List(N, N, N, Y, Y)
))
val cs_is_br = bpd_csignals(0)(0)
val cs_is_jal = bpd_csignals(1)(0)
val cs_is_jalr = bpd_csignals(2)(0)
val cs_is_shadowable = bpd_csignals(3)(0)
val cs_has_rs2 = bpd_csignals(4)(0)
io.out.is_call := (cs_is_jal || cs_is_jalr) && GetRd(io.inst) === RA
io.out.is_ret := cs_is_jalr && GetRs1(io.inst) === BitPat("b00?01") && GetRd(io.inst) === X0
io.out.target := Mux(cs_is_br, ComputeBranchTarget(io.pc, io.inst, xLen),
ComputeJALTarget(io.pc, io.inst, xLen))
io.out.cfi_type :=
Mux(cs_is_jalr,
CFI_JALR,
Mux(cs_is_jal,
CFI_JAL,
Mux(cs_is_br,
CFI_BR,
CFI_X)))
val br_offset = Cat(io.inst(7), io.inst(30,25), io.inst(11,8), 0.U(1.W))
// Is a sfb if it points forwards (offset is positive)
io.out.sfb_offset.valid := cs_is_br && !io.inst(31) && br_offset =/= 0.U && (br_offset >> log2Ceil(icBlockBytes)) === 0.U
io.out.sfb_offset.bits := br_offset
io.out.shadowable := cs_is_shadowable && (
!cs_has_rs2 ||
(GetRs1(io.inst) === GetRd(io.inst)) ||
(io.inst === ADD && GetRs1(io.inst) === X0)
)
}
/**
* Track the current "branch mask", and give out the branch mask to each micro-op in Decode
* (each micro-op in the machine has a branch mask which says which branches it
* is being speculated under).
*
* @param pl_width pipeline width for the processor
*/
class BranchMaskGenerationLogic(val pl_width: Int)(implicit p: Parameters) extends BoomModule
{
val io = IO(new Bundle {
// guess if the uop is a branch (we'll catch this later)
val is_branch = Input(Vec(pl_width, Bool()))
// lock in that it's actually a branch and will fire, so we update
// the branch_masks.
val will_fire = Input(Vec(pl_width, Bool()))
// give out tag immediately (needed in rename)
// mask can come later in the cycle
val br_tag = Output(Vec(pl_width, UInt(brTagSz.W)))
val br_mask = Output(Vec(pl_width, UInt(maxBrCount.W)))
// tell decoders the branch mask has filled up, but on the granularity
// of an individual micro-op (so some micro-ops can go through)
val is_full = Output(Vec(pl_width, Bool()))
val brupdate = Input(new BrUpdateInfo())
val flush_pipeline = Input(Bool())
val debug_branch_mask = Output(UInt(maxBrCount.W))
})
val branch_mask = RegInit(0.U(maxBrCount.W))
//-------------------------------------------------------------
// Give out the branch tag to each branch micro-op
var allocate_mask = branch_mask
val tag_masks = Wire(Vec(pl_width, UInt(maxBrCount.W)))
for (w <- 0 until pl_width) {
// TODO this is a loss of performance as we're blocking branches based on potentially fake branches
io.is_full(w) := (allocate_mask === ~(0.U(maxBrCount.W))) && io.is_branch(w)
// find br_tag and compute next br_mask
val new_br_tag = Wire(UInt(brTagSz.W))
new_br_tag := 0.U
tag_masks(w) := 0.U
for (i <- maxBrCount-1 to 0 by -1) {
when (~allocate_mask(i)) {
new_br_tag := i.U
tag_masks(w) := (1.U << i.U)
}
}
io.br_tag(w) := new_br_tag
allocate_mask = Mux(io.is_branch(w), tag_masks(w) | allocate_mask, allocate_mask)
}
//-------------------------------------------------------------
// Give out the branch mask to each micro-op
// (kill off the bits that corresponded to branches that aren't going to fire)
var curr_mask = branch_mask
for (w <- 0 until pl_width) {
io.br_mask(w) := GetNewBrMask(io.brupdate, curr_mask)
curr_mask = Mux(io.will_fire(w), tag_masks(w) | curr_mask, curr_mask)
}
//-------------------------------------------------------------
// Update the current branch_mask
when (io.flush_pipeline) {
branch_mask := 0.U
} .otherwise {
val mask = Mux(io.brupdate.b2.mispredict,
io.brupdate.b2.uop.br_mask,
~(0.U(maxBrCount.W)))
branch_mask := GetNewBrMask(io.brupdate, curr_mask) & mask
}
io.debug_branch_mask := branch_mask
} | module DecodeUnit(
input [31:0] io_enq_uop_inst,
input [31:0] io_enq_uop_debug_inst,
input io_enq_uop_is_rvc,
input [39:0] io_enq_uop_debug_pc,
input io_enq_uop_is_sfb,
input [3:0] io_enq_uop_ftq_idx,
input io_enq_uop_edge_inst,
input [5:0] io_enq_uop_pc_lob,
input io_enq_uop_taken,
input io_enq_uop_xcpt_pf_if,
input io_enq_uop_xcpt_ae_if,
input io_enq_uop_bp_debug_if,
input io_enq_uop_bp_xcpt_if,
input [1:0] io_enq_uop_debug_fsrc,
output [6:0] io_deq_uop_uopc,
output [31:0] io_deq_uop_inst,
output [31:0] io_deq_uop_debug_inst,
output io_deq_uop_is_rvc,
output [39:0] io_deq_uop_debug_pc,
output [2:0] io_deq_uop_iq_type,
output [9:0] io_deq_uop_fu_code,
output io_deq_uop_is_br,
output io_deq_uop_is_jalr,
output io_deq_uop_is_jal,
output io_deq_uop_is_sfb,
output [3:0] io_deq_uop_ftq_idx,
output io_deq_uop_edge_inst,
output [5:0] io_deq_uop_pc_lob,
output io_deq_uop_taken,
output [19:0] io_deq_uop_imm_packed,
output io_deq_uop_exception,
output [63:0] io_deq_uop_exc_cause,
output io_deq_uop_bypassable,
output [4:0] io_deq_uop_mem_cmd,
output [1:0] io_deq_uop_mem_size,
output io_deq_uop_mem_signed,
output io_deq_uop_is_fence,
output io_deq_uop_is_fencei,
output io_deq_uop_is_amo,
output io_deq_uop_uses_ldq,
output io_deq_uop_uses_stq,
output io_deq_uop_is_sys_pc2epc,
output io_deq_uop_is_unique,
output io_deq_uop_flush_on_commit,
output [5:0] io_deq_uop_ldst,
output [5:0] io_deq_uop_lrs1,
output [5:0] io_deq_uop_lrs2,
output [5:0] io_deq_uop_lrs3,
output io_deq_uop_ldst_val,
output [1:0] io_deq_uop_dst_rtype,
output [1:0] io_deq_uop_lrs1_rtype,
output [1:0] io_deq_uop_lrs2_rtype,
output io_deq_uop_frs3_en,
output io_deq_uop_fp_val,
output io_deq_uop_fp_single,
output io_deq_uop_xcpt_pf_if,
output io_deq_uop_xcpt_ae_if,
output io_deq_uop_bp_debug_if,
output io_deq_uop_bp_xcpt_if,
output [1:0] io_deq_uop_debug_fsrc,
output [31:0] io_csr_decode_inst,
input io_csr_decode_fp_illegal,
input io_csr_decode_read_illegal,
input io_csr_decode_write_illegal,
input io_csr_decode_write_flush,
input io_csr_decode_system_illegal,
input io_interrupt,
input [63:0] io_interrupt_cause
);
wire [4:0] _uop_lrs1_T;
wire [29:0] cs_decoder_decoded_invInputs = ~(io_enq_uop_inst[31:2]);
wire [6:0] _cs_decoder_decoded_andMatrixOutputs_T = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10]};
wire [7:0] _cs_decoder_decoded_andMatrixOutputs_T_1 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[2], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10]};
wire [7:0] _cs_decoder_decoded_andMatrixOutputs_T_2 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[2], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[11]};
wire [6:0] _cs_decoder_decoded_andMatrixOutputs_T_3 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[2], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[12]};
wire [8:0] _cs_decoder_decoded_andMatrixOutputs_T_5 = {io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], io_enq_uop_inst[3], cs_decoder_decoded_invInputs[2], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12]};
wire [9:0] _cs_decoder_decoded_andMatrixOutputs_T_6 = {io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], io_enq_uop_inst[3], cs_decoder_decoded_invInputs[2], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12]};
wire [7:0] _cs_decoder_decoded_andMatrixOutputs_T_7 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10]};
wire [8:0] _cs_decoder_decoded_andMatrixOutputs_T_8 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12]};
wire [5:0] _cs_decoder_decoded_andMatrixOutputs_T_10 = {io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[4]};
wire [6:0] _cs_decoder_decoded_andMatrixOutputs_T_11 = {io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4]};
wire [9:0] _cs_decoder_decoded_andMatrixOutputs_T_12 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[3], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12]};
wire [7:0] _cs_decoder_decoded_andMatrixOutputs_T_13 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[2], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[12]};
wire [14:0] _cs_decoder_decoded_andMatrixOutputs_T_14 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[29]};
wire [12:0] _cs_decoder_decoded_andMatrixOutputs_T_15 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [14:0] _cs_decoder_decoded_andMatrixOutputs_T_16 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [13:0] _cs_decoder_decoded_andMatrixOutputs_T_17 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [14:0] _cs_decoder_decoded_andMatrixOutputs_T_19 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [6:0] _cs_decoder_decoded_andMatrixOutputs_T_21 = {io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4]};
wire [15:0] _cs_decoder_decoded_andMatrixOutputs_T_22 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[3], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[29]};
wire [4:0] _cs_decoder_decoded_andMatrixOutputs_T_24 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[2], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6]};
wire [5:0] _cs_decoder_decoded_andMatrixOutputs_T_25 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[2], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[24]};
wire [7:0] _cs_decoder_decoded_andMatrixOutputs_T_28 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[2], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24]};
wire [7:0] _cs_decoder_decoded_andMatrixOutputs_T_30 = {io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], cs_decoder_decoded_invInputs[2], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24]};
wire [10:0] _cs_decoder_decoded_andMatrixOutputs_T_33 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [11:0] _cs_decoder_decoded_andMatrixOutputs_T_35 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [11:0] _cs_decoder_decoded_andMatrixOutputs_T_37 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [7:0] _cs_decoder_decoded_andMatrixOutputs_T_40 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[2], io_enq_uop_inst[5], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[11]};
wire [9:0] _cs_decoder_decoded_andMatrixOutputs_T_42 = {io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[2], io_enq_uop_inst[5], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12]};
wire [6:0] _cs_decoder_decoded_andMatrixOutputs_T_43 = {io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], io_enq_uop_inst[3], cs_decoder_decoded_invInputs[2], io_enq_uop_inst[5], io_enq_uop_inst[6]};
wire [27:0] _cs_decoder_decoded_andMatrixOutputs_T_44 = {io_enq_uop_inst[0], io_enq_uop_inst[4], io_enq_uop_inst[5], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[5], cs_decoder_decoded_invInputs[6], cs_decoder_decoded_invInputs[7], cs_decoder_decoded_invInputs[8], cs_decoder_decoded_invInputs[9], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[13], cs_decoder_decoded_invInputs[14], cs_decoder_decoded_invInputs[15], cs_decoder_decoded_invInputs[16], cs_decoder_decoded_invInputs[17], cs_decoder_decoded_invInputs[19], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [30:0] _cs_decoder_decoded_andMatrixOutputs_T_45 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[5], cs_decoder_decoded_invInputs[6], cs_decoder_decoded_invInputs[7], cs_decoder_decoded_invInputs[8], cs_decoder_decoded_invInputs[9], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[13], cs_decoder_decoded_invInputs[14], cs_decoder_decoded_invInputs[15], cs_decoder_decoded_invInputs[16], cs_decoder_decoded_invInputs[17], cs_decoder_decoded_invInputs[19], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [13:0] _cs_decoder_decoded_andMatrixOutputs_T_47 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[12], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [14:0] _cs_decoder_decoded_andMatrixOutputs_T_48 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[12], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [14:0] _cs_decoder_decoded_andMatrixOutputs_T_50 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[3], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[12], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [15:0] _cs_decoder_decoded_andMatrixOutputs_T_52 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[3], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[12], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [8:0] _cs_decoder_decoded_andMatrixOutputs_T_56 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[5], io_enq_uop_inst[6], io_enq_uop_inst[12], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12]};
wire [7:0] _cs_decoder_decoded_andMatrixOutputs_T_58 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], io_enq_uop_inst[6], io_enq_uop_inst[12]};
wire [9:0] _cs_decoder_decoded_andMatrixOutputs_T_59 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], io_enq_uop_inst[6], io_enq_uop_inst[12], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12]};
wire [6:0] _cs_decoder_decoded_andMatrixOutputs_T_61 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[2], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[13], cs_decoder_decoded_invInputs[12]};
wire [7:0] _cs_decoder_decoded_andMatrixOutputs_T_62 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[2], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[13], cs_decoder_decoded_invInputs[12]};
wire [7:0] _cs_decoder_decoded_andMatrixOutputs_T_66 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[13]};
wire [7:0] _cs_decoder_decoded_andMatrixOutputs_T_68 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[2], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[13], cs_decoder_decoded_invInputs[12]};
wire [8:0] _cs_decoder_decoded_andMatrixOutputs_T_69 = {io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[2], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[13], cs_decoder_decoded_invInputs[12]};
wire [10:0] _cs_decoder_decoded_andMatrixOutputs_T_70 = {io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], io_enq_uop_inst[3], cs_decoder_decoded_invInputs[2], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[13], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26]};
wire [7:0] _cs_decoder_decoded_andMatrixOutputs_T_73 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], io_enq_uop_inst[6], io_enq_uop_inst[13]};
wire [9:0] _cs_decoder_decoded_andMatrixOutputs_T_75 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[10], io_enq_uop_inst[13], cs_decoder_decoded_invInputs[12]};
wire [7:0] _cs_decoder_decoded_andMatrixOutputs_T_81 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[2], io_enq_uop_inst[5], io_enq_uop_inst[6], io_enq_uop_inst[14]};
wire [14:0] _cs_decoder_decoded_andMatrixOutputs_T_83 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[12], cs_decoder_decoded_invInputs[11], io_enq_uop_inst[14], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[29]};
wire [14:0] _cs_decoder_decoded_andMatrixOutputs_T_84 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[3], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[12], cs_decoder_decoded_invInputs[11], io_enq_uop_inst[14], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[29]};
wire [14:0] _cs_decoder_decoded_andMatrixOutputs_T_86 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[12], cs_decoder_decoded_invInputs[11], io_enq_uop_inst[14], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[29]};
wire [15:0] _cs_decoder_decoded_andMatrixOutputs_T_87 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[12], cs_decoder_decoded_invInputs[11], io_enq_uop_inst[14], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[29]};
wire [14:0] _cs_decoder_decoded_andMatrixOutputs_T_88 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[12], cs_decoder_decoded_invInputs[11], io_enq_uop_inst[14], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [8:0] _cs_decoder_decoded_andMatrixOutputs_T_92 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], io_enq_uop_inst[6], io_enq_uop_inst[12], io_enq_uop_inst[14]};
wire [8:0] _cs_decoder_decoded_andMatrixOutputs_T_93 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[13], io_enq_uop_inst[14]};
wire [8:0] _cs_decoder_decoded_andMatrixOutputs_T_95 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], io_enq_uop_inst[6], io_enq_uop_inst[13], io_enq_uop_inst[14]};
wire [13:0] _cs_decoder_decoded_andMatrixOutputs_T_96 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [14:0] _cs_decoder_decoded_andMatrixOutputs_T_97 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [14:0] _cs_decoder_decoded_andMatrixOutputs_T_98 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[12], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [13:0] _cs_decoder_decoded_andMatrixOutputs_T_106 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[14], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [14:0] _cs_decoder_decoded_andMatrixOutputs_T_108 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10], io_enq_uop_inst[14], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [14:0] _cs_decoder_decoded_andMatrixOutputs_T_109 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[11], io_enq_uop_inst[14], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [12:0] _cs_decoder_decoded_andMatrixOutputs_T_115 = {io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], io_enq_uop_inst[3], cs_decoder_decoded_invInputs[2], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[13], cs_decoder_decoded_invInputs[12], io_enq_uop_inst[27], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [16:0] _cs_decoder_decoded_andMatrixOutputs_T_118 = {io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], io_enq_uop_inst[3], cs_decoder_decoded_invInputs[2], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[18], cs_decoder_decoded_invInputs[19], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[25], io_enq_uop_inst[28], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [18:0] _cs_decoder_decoded_andMatrixOutputs_T_121 = {io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], io_enq_uop_inst[3], cs_decoder_decoded_invInputs[2], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[13], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[18], cs_decoder_decoded_invInputs[19], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[25], io_enq_uop_inst[28], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [27:0] _cs_decoder_decoded_andMatrixOutputs_T_122 = {io_enq_uop_inst[0], io_enq_uop_inst[4], io_enq_uop_inst[5], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[5], cs_decoder_decoded_invInputs[6], cs_decoder_decoded_invInputs[7], cs_decoder_decoded_invInputs[8], cs_decoder_decoded_invInputs[9], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[13], cs_decoder_decoded_invInputs[14], cs_decoder_decoded_invInputs[15], cs_decoder_decoded_invInputs[16], cs_decoder_decoded_invInputs[17], cs_decoder_decoded_invInputs[18], io_enq_uop_inst[21], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], io_enq_uop_inst[28], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [30:0] _cs_decoder_decoded_andMatrixOutputs_T_123 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[5], cs_decoder_decoded_invInputs[6], cs_decoder_decoded_invInputs[7], cs_decoder_decoded_invInputs[8], cs_decoder_decoded_invInputs[9], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[13], cs_decoder_decoded_invInputs[14], cs_decoder_decoded_invInputs[15], cs_decoder_decoded_invInputs[16], cs_decoder_decoded_invInputs[17], cs_decoder_decoded_invInputs[18], io_enq_uop_inst[21], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], io_enq_uop_inst[28], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [27:0] _cs_decoder_decoded_andMatrixOutputs_T_124 = {io_enq_uop_inst[4], io_enq_uop_inst[5], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[5], cs_decoder_decoded_invInputs[6], cs_decoder_decoded_invInputs[7], cs_decoder_decoded_invInputs[8], cs_decoder_decoded_invInputs[9], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[13], cs_decoder_decoded_invInputs[14], cs_decoder_decoded_invInputs[15], cs_decoder_decoded_invInputs[16], cs_decoder_decoded_invInputs[17], io_enq_uop_inst[20], cs_decoder_decoded_invInputs[19], io_enq_uop_inst[22], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], io_enq_uop_inst[28], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [31:0] _cs_decoder_decoded_andMatrixOutputs_T_125 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[5], cs_decoder_decoded_invInputs[6], cs_decoder_decoded_invInputs[7], cs_decoder_decoded_invInputs[8], cs_decoder_decoded_invInputs[9], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[13], cs_decoder_decoded_invInputs[14], cs_decoder_decoded_invInputs[15], cs_decoder_decoded_invInputs[16], cs_decoder_decoded_invInputs[17], io_enq_uop_inst[20], cs_decoder_decoded_invInputs[19], io_enq_uop_inst[22], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], io_enq_uop_inst[28], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [16:0] _cs_decoder_decoded_andMatrixOutputs_T_127 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[5], cs_decoder_decoded_invInputs[6], cs_decoder_decoded_invInputs[7], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], io_enq_uop_inst[28], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [18:0] _cs_decoder_decoded_andMatrixOutputs_T_128 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[5], cs_decoder_decoded_invInputs[6], cs_decoder_decoded_invInputs[7], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], io_enq_uop_inst[28], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [19:0] _cs_decoder_decoded_andMatrixOutputs_T_129 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[5], cs_decoder_decoded_invInputs[6], cs_decoder_decoded_invInputs[7], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], io_enq_uop_inst[28], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [21:0] _cs_decoder_decoded_andMatrixOutputs_T_130 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[5], cs_decoder_decoded_invInputs[6], cs_decoder_decoded_invInputs[7], cs_decoder_decoded_invInputs[8], cs_decoder_decoded_invInputs[9], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], io_enq_uop_inst[28], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [12:0] _cs_decoder_decoded_andMatrixOutputs_T_131 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[24], io_enq_uop_inst[27], io_enq_uop_inst[28], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [13:0] _cs_decoder_decoded_andMatrixOutputs_T_132 = {io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], io_enq_uop_inst[3], cs_decoder_decoded_invInputs[2], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[13], cs_decoder_decoded_invInputs[12], io_enq_uop_inst[27], io_enq_uop_inst[28], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [13:0] _cs_decoder_decoded_andMatrixOutputs_T_133 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], io_enq_uop_inst[29], cs_decoder_decoded_invInputs[28]};
wire [13:0] _cs_decoder_decoded_andMatrixOutputs_T_134 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], io_enq_uop_inst[29], cs_decoder_decoded_invInputs[28]};
wire [13:0] _cs_decoder_decoded_andMatrixOutputs_T_137 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[26], io_enq_uop_inst[29], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [14:0] _cs_decoder_decoded_andMatrixOutputs_T_139 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], io_enq_uop_inst[29], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [14:0] _cs_decoder_decoded_andMatrixOutputs_T_145 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[24], io_enq_uop_inst[27], cs_decoder_decoded_invInputs[26], io_enq_uop_inst[29], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]};
wire [17:0] _cs_decoder_decoded_andMatrixOutputs_T_149 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], io_enq_uop_inst[20], cs_decoder_decoded_invInputs[19], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], io_enq_uop_inst[30]};
wire [18:0] _cs_decoder_decoded_andMatrixOutputs_T_150 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], io_enq_uop_inst[20], cs_decoder_decoded_invInputs[19], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], io_enq_uop_inst[30], cs_decoder_decoded_invInputs[29]};
wire [17:0] _cs_decoder_decoded_andMatrixOutputs_T_151 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[18], cs_decoder_decoded_invInputs[19], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], io_enq_uop_inst[30]};
wire [18:0] _cs_decoder_decoded_andMatrixOutputs_T_152 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[18], cs_decoder_decoded_invInputs[19], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], io_enq_uop_inst[30], cs_decoder_decoded_invInputs[29]};
wire [17:0] _cs_decoder_decoded_andMatrixOutputs_T_153 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[18], cs_decoder_decoded_invInputs[19], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[24], io_enq_uop_inst[27], io_enq_uop_inst[28], cs_decoder_decoded_invInputs[27], io_enq_uop_inst[30], cs_decoder_decoded_invInputs[29]};
wire [18:0] _cs_decoder_decoded_andMatrixOutputs_T_154 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[18], cs_decoder_decoded_invInputs[19], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], io_enq_uop_inst[27], io_enq_uop_inst[28], cs_decoder_decoded_invInputs[27], io_enq_uop_inst[30], cs_decoder_decoded_invInputs[29]};
wire [27:0] _cs_decoder_decoded_andMatrixOutputs_T_156 = {io_enq_uop_inst[4], io_enq_uop_inst[5], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[5], cs_decoder_decoded_invInputs[6], cs_decoder_decoded_invInputs[7], cs_decoder_decoded_invInputs[8], cs_decoder_decoded_invInputs[9], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[13], cs_decoder_decoded_invInputs[14], cs_decoder_decoded_invInputs[15], cs_decoder_decoded_invInputs[16], cs_decoder_decoded_invInputs[17], cs_decoder_decoded_invInputs[18], io_enq_uop_inst[21], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], io_enq_uop_inst[24], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], io_enq_uop_inst[27], io_enq_uop_inst[28], io_enq_uop_inst[29], io_enq_uop_inst[30], cs_decoder_decoded_invInputs[29]};
wire [31:0] _cs_decoder_decoded_andMatrixOutputs_T_157 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[5], cs_decoder_decoded_invInputs[6], cs_decoder_decoded_invInputs[7], cs_decoder_decoded_invInputs[8], cs_decoder_decoded_invInputs[9], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[13], cs_decoder_decoded_invInputs[14], cs_decoder_decoded_invInputs[15], cs_decoder_decoded_invInputs[16], cs_decoder_decoded_invInputs[17], cs_decoder_decoded_invInputs[18], io_enq_uop_inst[21], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], io_enq_uop_inst[24], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], io_enq_uop_inst[27], io_enq_uop_inst[28], io_enq_uop_inst[29], io_enq_uop_inst[30], cs_decoder_decoded_invInputs[29]};
wire [14:0] _cs_decoder_decoded_andMatrixOutputs_T_159 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], io_enq_uop_inst[29], cs_decoder_decoded_invInputs[28], io_enq_uop_inst[31]};
wire [14:0] _cs_decoder_decoded_andMatrixOutputs_T_160 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], io_enq_uop_inst[29], cs_decoder_decoded_invInputs[28], io_enq_uop_inst[31]};
wire [15:0] _cs_decoder_decoded_andMatrixOutputs_T_161 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[18], cs_decoder_decoded_invInputs[19], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], io_enq_uop_inst[30], io_enq_uop_inst[31]};
wire [14:0] _cs_decoder_decoded_andMatrixOutputs_T_162 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[27], io_enq_uop_inst[30], io_enq_uop_inst[31]};
wire [15:0] _cs_decoder_decoded_andMatrixOutputs_T_164 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], io_enq_uop_inst[30], io_enq_uop_inst[31]};
wire [15:0] _cs_decoder_decoded_andMatrixOutputs_T_166 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], io_enq_uop_inst[28], cs_decoder_decoded_invInputs[27], io_enq_uop_inst[30], io_enq_uop_inst[31]};
wire [19:0] _cs_decoder_decoded_andMatrixOutputs_T_167 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[18], cs_decoder_decoded_invInputs[19], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], io_enq_uop_inst[29], io_enq_uop_inst[30], io_enq_uop_inst[31]};
wire [19:0] _cs_decoder_decoded_andMatrixOutputs_T_169 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[18], cs_decoder_decoded_invInputs[19], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], io_enq_uop_inst[29], io_enq_uop_inst[30], io_enq_uop_inst[31]};
wire [20:0] _cs_decoder_decoded_andMatrixOutputs_T_174 = {io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[18], cs_decoder_decoded_invInputs[19], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], io_enq_uop_inst[28], io_enq_uop_inst[29], io_enq_uop_inst[30], io_enq_uop_inst[31]};
wire [36:0] _cs_decoder_decoded_orMatrixOutputs_T_76 =
{&_cs_decoder_decoded_andMatrixOutputs_T_2,
&_cs_decoder_decoded_andMatrixOutputs_T_8,
&_cs_decoder_decoded_andMatrixOutputs_T_11,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[2], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[24]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], io_enq_uop_inst[3], cs_decoder_decoded_invInputs[2], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[24]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_43,
&_cs_decoder_decoded_andMatrixOutputs_T_48,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[3], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[12], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[2], io_enq_uop_inst[5], io_enq_uop_inst[6], io_enq_uop_inst[12], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10], io_enq_uop_inst[13]},
&_cs_decoder_decoded_andMatrixOutputs_T_62,
&_cs_decoder_decoded_andMatrixOutputs_T_70,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10], io_enq_uop_inst[13], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_75,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[12], io_enq_uop_inst[14], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[3], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[12], cs_decoder_decoded_invInputs[11], io_enq_uop_inst[14], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_92,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[2], io_enq_uop_inst[5], io_enq_uop_inst[6], io_enq_uop_inst[13], io_enq_uop_inst[14]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[12], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_108,
&_cs_decoder_decoded_andMatrixOutputs_T_115,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], io_enq_uop_inst[27], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_118,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], io_enq_uop_inst[28], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_124,
&_cs_decoder_decoded_andMatrixOutputs_T_129,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[12], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], io_enq_uop_inst[29], cs_decoder_decoded_invInputs[28]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], io_enq_uop_inst[29], cs_decoder_decoded_invInputs[28]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[26], io_enq_uop_inst[29], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[3], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], io_enq_uop_inst[30], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_151,
&_cs_decoder_decoded_andMatrixOutputs_T_154,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[27], io_enq_uop_inst[30], io_enq_uop_inst[31]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[18], cs_decoder_decoded_invInputs[19], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], io_enq_uop_inst[29], io_enq_uop_inst[30], io_enq_uop_inst[31]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[18], cs_decoder_decoded_invInputs[19], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], io_enq_uop_inst[29], io_enq_uop_inst[30], io_enq_uop_inst[31]}};
wire [11:0] _cs_decoder_decoded_orMatrixOutputs_T_92 = {&_cs_decoder_decoded_andMatrixOutputs_T_25, &_cs_decoder_decoded_andMatrixOutputs_T_33, &{io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[2], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[13], cs_decoder_decoded_invInputs[12]}, &_cs_decoder_decoded_andMatrixOutputs_T_133, &_cs_decoder_decoded_andMatrixOutputs_T_134, &_cs_decoder_decoded_andMatrixOutputs_T_137, &_cs_decoder_decoded_andMatrixOutputs_T_149, &_cs_decoder_decoded_andMatrixOutputs_T_151, &_cs_decoder_decoded_andMatrixOutputs_T_153, &_cs_decoder_decoded_andMatrixOutputs_T_162, &_cs_decoder_decoded_andMatrixOutputs_T_167, &_cs_decoder_decoded_andMatrixOutputs_T_169};
wire [6:0] cs_uopc =
{|{&_cs_decoder_decoded_andMatrixOutputs_T_24, &_cs_decoder_decoded_andMatrixOutputs_T_33, &_cs_decoder_decoded_andMatrixOutputs_T_44, &_cs_decoder_decoded_andMatrixOutputs_T_70, &{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[3], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[12], io_enq_uop_inst[13], io_enq_uop_inst[14], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]}, &_cs_decoder_decoded_andMatrixOutputs_T_115, &_cs_decoder_decoded_andMatrixOutputs_T_122, &_cs_decoder_decoded_andMatrixOutputs_T_124, &_cs_decoder_decoded_andMatrixOutputs_T_129, &_cs_decoder_decoded_andMatrixOutputs_T_133, &_cs_decoder_decoded_andMatrixOutputs_T_134, &_cs_decoder_decoded_andMatrixOutputs_T_137, &_cs_decoder_decoded_andMatrixOutputs_T_149, &_cs_decoder_decoded_andMatrixOutputs_T_151, &_cs_decoder_decoded_andMatrixOutputs_T_153, &_cs_decoder_decoded_andMatrixOutputs_T_156, &_cs_decoder_decoded_andMatrixOutputs_T_161, &_cs_decoder_decoded_andMatrixOutputs_T_162},
|{&_cs_decoder_decoded_andMatrixOutputs_T_6, &_cs_decoder_decoded_andMatrixOutputs_T_11, &_cs_decoder_decoded_andMatrixOutputs_T_12, &_cs_decoder_decoded_andMatrixOutputs_T_22, &{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[3], cs_decoder_decoded_invInputs[2], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[24]}, &_cs_decoder_decoded_andMatrixOutputs_T_42, &_cs_decoder_decoded_andMatrixOutputs_T_43, &_cs_decoder_decoded_andMatrixOutputs_T_44, &_cs_decoder_decoded_andMatrixOutputs_T_50, &{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], io_enq_uop_inst[6], io_enq_uop_inst[12], io_enq_uop_inst[13]}, &_cs_decoder_decoded_andMatrixOutputs_T_84, &_cs_decoder_decoded_andMatrixOutputs_T_92, &_cs_decoder_decoded_andMatrixOutputs_T_95, &_cs_decoder_decoded_andMatrixOutputs_T_96, &_cs_decoder_decoded_andMatrixOutputs_T_97, &{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[2], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24]}, &_cs_decoder_decoded_andMatrixOutputs_T_108, &_cs_decoder_decoded_andMatrixOutputs_T_109, &_cs_decoder_decoded_andMatrixOutputs_T_122, &_cs_decoder_decoded_andMatrixOutputs_T_124, &_cs_decoder_decoded_andMatrixOutputs_T_129, &_cs_decoder_decoded_andMatrixOutputs_T_131, &_cs_decoder_decoded_andMatrixOutputs_T_153, &_cs_decoder_decoded_andMatrixOutputs_T_156},
|{&_cs_decoder_decoded_andMatrixOutputs_T_28,
&_cs_decoder_decoded_andMatrixOutputs_T_30,
&_cs_decoder_decoded_andMatrixOutputs_T_35,
&_cs_decoder_decoded_andMatrixOutputs_T_37,
&_cs_decoder_decoded_andMatrixOutputs_T_40,
&_cs_decoder_decoded_andMatrixOutputs_T_56,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[13], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_75,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[14], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_81,
&_cs_decoder_decoded_andMatrixOutputs_T_84,
&_cs_decoder_decoded_andMatrixOutputs_T_86,
&_cs_decoder_decoded_andMatrixOutputs_T_88,
&_cs_decoder_decoded_andMatrixOutputs_T_96,
&_cs_decoder_decoded_andMatrixOutputs_T_97,
&_cs_decoder_decoded_andMatrixOutputs_T_108,
&_cs_decoder_decoded_andMatrixOutputs_T_145,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], io_enq_uop_inst[30], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_159,
&_cs_decoder_decoded_andMatrixOutputs_T_160,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], io_enq_uop_inst[12], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[18], cs_decoder_decoded_invInputs[19], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], io_enq_uop_inst[29], io_enq_uop_inst[30], io_enq_uop_inst[31]}},
|{&_cs_decoder_decoded_andMatrixOutputs_T_6,
&_cs_decoder_decoded_andMatrixOutputs_T_12,
&_cs_decoder_decoded_andMatrixOutputs_T_19,
&_cs_decoder_decoded_andMatrixOutputs_T_22,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[3], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_28,
&_cs_decoder_decoded_andMatrixOutputs_T_30,
&_cs_decoder_decoded_andMatrixOutputs_T_40,
&_cs_decoder_decoded_andMatrixOutputs_T_44,
&_cs_decoder_decoded_andMatrixOutputs_T_48,
&_cs_decoder_decoded_andMatrixOutputs_T_52,
&_cs_decoder_decoded_andMatrixOutputs_T_56,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[13], cs_decoder_decoded_invInputs[12]},
&_cs_decoder_decoded_andMatrixOutputs_T_75,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], io_enq_uop_inst[14]},
&_cs_decoder_decoded_andMatrixOutputs_T_81,
&_cs_decoder_decoded_andMatrixOutputs_T_83,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[14], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_108,
&_cs_decoder_decoded_andMatrixOutputs_T_109,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[24], io_enq_uop_inst[27], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], io_enq_uop_inst[28], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_122,
&_cs_decoder_decoded_andMatrixOutputs_T_124,
&_cs_decoder_decoded_andMatrixOutputs_T_128,
&_cs_decoder_decoded_andMatrixOutputs_T_139,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], io_enq_uop_inst[29], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_149,
&_cs_decoder_decoded_andMatrixOutputs_T_151,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[18], cs_decoder_decoded_invInputs[19], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], io_enq_uop_inst[27], io_enq_uop_inst[28], cs_decoder_decoded_invInputs[27], io_enq_uop_inst[30], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_156,
&_cs_decoder_decoded_andMatrixOutputs_T_162},
|{&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12]},
&_cs_decoder_decoded_andMatrixOutputs_T_10,
&_cs_decoder_decoded_andMatrixOutputs_T_19,
&_cs_decoder_decoded_andMatrixOutputs_T_22,
&_cs_decoder_decoded_andMatrixOutputs_T_28,
&_cs_decoder_decoded_andMatrixOutputs_T_30,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_42,
&_cs_decoder_decoded_andMatrixOutputs_T_43,
&_cs_decoder_decoded_andMatrixOutputs_T_52,
&_cs_decoder_decoded_andMatrixOutputs_T_59,
&_cs_decoder_decoded_andMatrixOutputs_T_75,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10], io_enq_uop_inst[14], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[2], io_enq_uop_inst[5], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[10], io_enq_uop_inst[14]},
&_cs_decoder_decoded_andMatrixOutputs_T_83,
&_cs_decoder_decoded_andMatrixOutputs_T_87,
&_cs_decoder_decoded_andMatrixOutputs_T_93,
&_cs_decoder_decoded_andMatrixOutputs_T_98,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[3], cs_decoder_decoded_invInputs[2], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[12], io_enq_uop_inst[13], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[3], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10], io_enq_uop_inst[14], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[3], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[11], io_enq_uop_inst[14], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], io_enq_uop_inst[28], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_131,
&_cs_decoder_decoded_andMatrixOutputs_T_145,
&_cs_decoder_decoded_andMatrixOutputs_T_154,
&_cs_decoder_decoded_andMatrixOutputs_T_162,
&_cs_decoder_decoded_andMatrixOutputs_T_167},
|{&_cs_decoder_decoded_andMatrixOutputs_T_6,
&_cs_decoder_decoded_andMatrixOutputs_T_11,
&_cs_decoder_decoded_andMatrixOutputs_T_12,
&_cs_decoder_decoded_andMatrixOutputs_T_13,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], cs_decoder_decoded_invInputs[2], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[24]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_42,
&_cs_decoder_decoded_andMatrixOutputs_T_44,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[12], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_50,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[12], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_59,
&_cs_decoder_decoded_andMatrixOutputs_T_68,
&_cs_decoder_decoded_andMatrixOutputs_T_70,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[10], io_enq_uop_inst[13]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[12], io_enq_uop_inst[13]},
&_cs_decoder_decoded_andMatrixOutputs_T_87,
&_cs_decoder_decoded_andMatrixOutputs_T_88,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[2], io_enq_uop_inst[5], io_enq_uop_inst[6], io_enq_uop_inst[12], io_enq_uop_inst[14]},
&_cs_decoder_decoded_andMatrixOutputs_T_93,
&_cs_decoder_decoded_andMatrixOutputs_T_95,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[13], cs_decoder_decoded_invInputs[12], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10], io_enq_uop_inst[13], io_enq_uop_inst[14], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_115,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], io_enq_uop_inst[27], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_122,
&_cs_decoder_decoded_andMatrixOutputs_T_128,
&_cs_decoder_decoded_andMatrixOutputs_T_149,
&_cs_decoder_decoded_andMatrixOutputs_T_151,
&_cs_decoder_decoded_andMatrixOutputs_T_154,
&_cs_decoder_decoded_andMatrixOutputs_T_156,
&_cs_decoder_decoded_andMatrixOutputs_T_164,
&_cs_decoder_decoded_andMatrixOutputs_T_169},
|_cs_decoder_decoded_orMatrixOutputs_T_76};
wire [1:0] cs_dst_type =
{{&_cs_decoder_decoded_andMatrixOutputs_T, &_cs_decoder_decoded_andMatrixOutputs_T_2, &_cs_decoder_decoded_andMatrixOutputs_T_8, &_cs_decoder_decoded_andMatrixOutputs_T_10, &_cs_decoder_decoded_andMatrixOutputs_T_14, &_cs_decoder_decoded_andMatrixOutputs_T_15, &_cs_decoder_decoded_andMatrixOutputs_T_16, &_cs_decoder_decoded_andMatrixOutputs_T_25, &_cs_decoder_decoded_andMatrixOutputs_T_33, &_cs_decoder_decoded_andMatrixOutputs_T_42, &_cs_decoder_decoded_andMatrixOutputs_T_43, &_cs_decoder_decoded_andMatrixOutputs_T_47, &_cs_decoder_decoded_andMatrixOutputs_T_50, &_cs_decoder_decoded_andMatrixOutputs_T_58, &_cs_decoder_decoded_andMatrixOutputs_T_62, &_cs_decoder_decoded_andMatrixOutputs_T_66, &_cs_decoder_decoded_andMatrixOutputs_T_70, &_cs_decoder_decoded_andMatrixOutputs_T_73, &_cs_decoder_decoded_andMatrixOutputs_T_83, &_cs_decoder_decoded_andMatrixOutputs_T_84, &_cs_decoder_decoded_andMatrixOutputs_T_86, &_cs_decoder_decoded_andMatrixOutputs_T_106, &_cs_decoder_decoded_andMatrixOutputs_T_115, &_cs_decoder_decoded_andMatrixOutputs_T_121, &_cs_decoder_decoded_andMatrixOutputs_T_133, &_cs_decoder_decoded_andMatrixOutputs_T_134, &_cs_decoder_decoded_andMatrixOutputs_T_137, &_cs_decoder_decoded_andMatrixOutputs_T_149, &_cs_decoder_decoded_andMatrixOutputs_T_151, &_cs_decoder_decoded_andMatrixOutputs_T_153, &_cs_decoder_decoded_andMatrixOutputs_T_162, &_cs_decoder_decoded_andMatrixOutputs_T_167, &_cs_decoder_decoded_andMatrixOutputs_T_169} == 33'h0,
|{&_cs_decoder_decoded_andMatrixOutputs_T_25, &_cs_decoder_decoded_andMatrixOutputs_T_33, &{io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[2], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[13], cs_decoder_decoded_invInputs[12]}, &_cs_decoder_decoded_andMatrixOutputs_T_137, &_cs_decoder_decoded_andMatrixOutputs_T_139, &_cs_decoder_decoded_andMatrixOutputs_T_150, &_cs_decoder_decoded_andMatrixOutputs_T_152, &_cs_decoder_decoded_andMatrixOutputs_T_153, &_cs_decoder_decoded_andMatrixOutputs_T_166, &_cs_decoder_decoded_andMatrixOutputs_T_174}};
wire [2:0] cs_imm_sel = {&_cs_decoder_decoded_andMatrixOutputs_T_43, |{&_cs_decoder_decoded_andMatrixOutputs_T_10, &_cs_decoder_decoded_andMatrixOutputs_T_40, &_cs_decoder_decoded_andMatrixOutputs_T_81}, |{&_cs_decoder_decoded_andMatrixOutputs_T_10, &_cs_decoder_decoded_andMatrixOutputs_T_13, &_cs_decoder_decoded_andMatrixOutputs_T_68}};
wire [4:0] cs_mem_cmd = {&_cs_decoder_decoded_andMatrixOutputs_T_127, &_cs_decoder_decoded_andMatrixOutputs_T_70, |{&_cs_decoder_decoded_andMatrixOutputs_T_115, &_cs_decoder_decoded_andMatrixOutputs_T_118, &_cs_decoder_decoded_andMatrixOutputs_T_127, &{io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], io_enq_uop_inst[3], cs_decoder_decoded_invInputs[2], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[13], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], io_enq_uop_inst[31]}}, |{&_cs_decoder_decoded_andMatrixOutputs_T_118, &_cs_decoder_decoded_andMatrixOutputs_T_132, &{io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], io_enq_uop_inst[3], cs_decoder_decoded_invInputs[2], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[13], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], io_enq_uop_inst[30]}}, |{&_cs_decoder_decoded_andMatrixOutputs_T_13, &_cs_decoder_decoded_andMatrixOutputs_T_68, &_cs_decoder_decoded_andMatrixOutputs_T_132, &{io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], io_enq_uop_inst[3], cs_decoder_decoded_invInputs[2], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[13], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], io_enq_uop_inst[29]}}};
wire [2:0] cs_csr_cmd = {|{&_cs_decoder_decoded_andMatrixOutputs_T_44, &_cs_decoder_decoded_andMatrixOutputs_T_58, &_cs_decoder_decoded_andMatrixOutputs_T_73, &_cs_decoder_decoded_andMatrixOutputs_T_122, &_cs_decoder_decoded_andMatrixOutputs_T_124, &_cs_decoder_decoded_andMatrixOutputs_T_156}, &_cs_decoder_decoded_andMatrixOutputs_T_73, &_cs_decoder_decoded_andMatrixOutputs_T_58};
wire _csr_ren_T = cs_csr_cmd == 3'h6;
wire csr_en = _csr_ren_T | (&cs_csr_cmd) | cs_csr_cmd == 3'h5;
wire csr_ren = (_csr_ren_T | (&cs_csr_cmd)) & _uop_lrs1_T == 5'h0;
wire _GEN = io_interrupt & ~io_enq_uop_is_sfb;
assign _uop_lrs1_T = io_enq_uop_inst[19:15];
assign io_deq_uop_uopc = cs_uopc;
assign io_deq_uop_inst = io_enq_uop_inst;
assign io_deq_uop_debug_inst = io_enq_uop_debug_inst;
assign io_deq_uop_is_rvc = io_enq_uop_is_rvc;
assign io_deq_uop_debug_pc = io_enq_uop_debug_pc;
assign io_deq_uop_iq_type = {|{&_cs_decoder_decoded_andMatrixOutputs_T_25, &_cs_decoder_decoded_andMatrixOutputs_T_33, &_cs_decoder_decoded_andMatrixOutputs_T_69, &_cs_decoder_decoded_andMatrixOutputs_T_133, &_cs_decoder_decoded_andMatrixOutputs_T_134, &_cs_decoder_decoded_andMatrixOutputs_T_137, &_cs_decoder_decoded_andMatrixOutputs_T_149, &_cs_decoder_decoded_andMatrixOutputs_T_151, &_cs_decoder_decoded_andMatrixOutputs_T_153, &_cs_decoder_decoded_andMatrixOutputs_T_164, &_cs_decoder_decoded_andMatrixOutputs_T_169}, |{&_cs_decoder_decoded_andMatrixOutputs_T_1, &_cs_decoder_decoded_andMatrixOutputs_T_2, &_cs_decoder_decoded_andMatrixOutputs_T_3, &_cs_decoder_decoded_andMatrixOutputs_T_61, &_cs_decoder_decoded_andMatrixOutputs_T_70, &_cs_decoder_decoded_andMatrixOutputs_T_115, &_cs_decoder_decoded_andMatrixOutputs_T_121, &_cs_decoder_decoded_andMatrixOutputs_T_130}, {&_cs_decoder_decoded_andMatrixOutputs_T_1, &_cs_decoder_decoded_andMatrixOutputs_T_2, &_cs_decoder_decoded_andMatrixOutputs_T_3, &_cs_decoder_decoded_andMatrixOutputs_T_25, &_cs_decoder_decoded_andMatrixOutputs_T_33, &_cs_decoder_decoded_andMatrixOutputs_T_61, &_cs_decoder_decoded_andMatrixOutputs_T_70, &_cs_decoder_decoded_andMatrixOutputs_T_115, &_cs_decoder_decoded_andMatrixOutputs_T_121, &_cs_decoder_decoded_andMatrixOutputs_T_130, &_cs_decoder_decoded_andMatrixOutputs_T_133, &_cs_decoder_decoded_andMatrixOutputs_T_134, &_cs_decoder_decoded_andMatrixOutputs_T_137, &_cs_decoder_decoded_andMatrixOutputs_T_149, &_cs_decoder_decoded_andMatrixOutputs_T_151, &_cs_decoder_decoded_andMatrixOutputs_T_153, &_cs_decoder_decoded_andMatrixOutputs_T_164, &_cs_decoder_decoded_andMatrixOutputs_T_169} == 18'h0};
assign io_deq_uop_fu_code =
{|{&_cs_decoder_decoded_andMatrixOutputs_T_69, &_cs_decoder_decoded_andMatrixOutputs_T_159, &_cs_decoder_decoded_andMatrixOutputs_T_160, &_cs_decoder_decoded_andMatrixOutputs_T_164, &_cs_decoder_decoded_andMatrixOutputs_T_169},
|{&_cs_decoder_decoded_andMatrixOutputs_T_166, &_cs_decoder_decoded_andMatrixOutputs_T_174},
|{&_cs_decoder_decoded_andMatrixOutputs_T_131, &_cs_decoder_decoded_andMatrixOutputs_T_153},
|{&_cs_decoder_decoded_andMatrixOutputs_T_24, &_cs_decoder_decoded_andMatrixOutputs_T_35, &_cs_decoder_decoded_andMatrixOutputs_T_37, &_cs_decoder_decoded_andMatrixOutputs_T_137, &_cs_decoder_decoded_andMatrixOutputs_T_139, &_cs_decoder_decoded_andMatrixOutputs_T_150, &_cs_decoder_decoded_andMatrixOutputs_T_152},
|{&_cs_decoder_decoded_andMatrixOutputs_T_44, &_cs_decoder_decoded_andMatrixOutputs_T_58, &_cs_decoder_decoded_andMatrixOutputs_T_73, &_cs_decoder_decoded_andMatrixOutputs_T_122, &_cs_decoder_decoded_andMatrixOutputs_T_124, &_cs_decoder_decoded_andMatrixOutputs_T_156},
&_cs_decoder_decoded_andMatrixOutputs_T_106,
|{&_cs_decoder_decoded_andMatrixOutputs_T_98, &{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[4], io_enq_uop_inst[5], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], io_enq_uop_inst[25], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]}},
|{&_cs_decoder_decoded_andMatrixOutputs_T_1, &_cs_decoder_decoded_andMatrixOutputs_T_2, &_cs_decoder_decoded_andMatrixOutputs_T_3, &_cs_decoder_decoded_andMatrixOutputs_T_6, &_cs_decoder_decoded_andMatrixOutputs_T_61, &_cs_decoder_decoded_andMatrixOutputs_T_70, &_cs_decoder_decoded_andMatrixOutputs_T_115, &_cs_decoder_decoded_andMatrixOutputs_T_118, &_cs_decoder_decoded_andMatrixOutputs_T_129},
|{&_cs_decoder_decoded_andMatrixOutputs_T_11, &_cs_decoder_decoded_andMatrixOutputs_T_42, &_cs_decoder_decoded_andMatrixOutputs_T_43},
|{&_cs_decoder_decoded_andMatrixOutputs_T_7, &_cs_decoder_decoded_andMatrixOutputs_T_8, &_cs_decoder_decoded_andMatrixOutputs_T_14, &_cs_decoder_decoded_andMatrixOutputs_T_17, &_cs_decoder_decoded_andMatrixOutputs_T_21, &_cs_decoder_decoded_andMatrixOutputs_T_40, &_cs_decoder_decoded_andMatrixOutputs_T_48, &_cs_decoder_decoded_andMatrixOutputs_T_50, &_cs_decoder_decoded_andMatrixOutputs_T_66, &_cs_decoder_decoded_andMatrixOutputs_T_81, &_cs_decoder_decoded_andMatrixOutputs_T_83, &_cs_decoder_decoded_andMatrixOutputs_T_84, &_cs_decoder_decoded_andMatrixOutputs_T_86}};
assign io_deq_uop_is_br = |{&_cs_decoder_decoded_andMatrixOutputs_T_40, &_cs_decoder_decoded_andMatrixOutputs_T_81};
assign io_deq_uop_is_jalr = cs_uopc == 7'h26;
assign io_deq_uop_is_jal = cs_uopc == 7'h25;
assign io_deq_uop_is_sfb = io_enq_uop_is_sfb;
assign io_deq_uop_ftq_idx = io_enq_uop_ftq_idx;
assign io_deq_uop_edge_inst = io_enq_uop_edge_inst;
assign io_deq_uop_pc_lob = io_enq_uop_pc_lob;
assign io_deq_uop_taken = io_enq_uop_taken;
assign io_deq_uop_imm_packed = {io_enq_uop_inst[31:25], cs_imm_sel == 3'h2 | cs_imm_sel == 3'h1 ? io_enq_uop_inst[11:7] : io_enq_uop_inst[24:20], io_enq_uop_inst[19:12]};
assign io_deq_uop_exception =
_GEN | io_enq_uop_bp_debug_if | io_enq_uop_bp_xcpt_if | io_enq_uop_xcpt_pf_if | io_enq_uop_xcpt_ae_if
| {&_cs_decoder_decoded_andMatrixOutputs_T,
&_cs_decoder_decoded_andMatrixOutputs_T_2,
&_cs_decoder_decoded_andMatrixOutputs_T_3,
&_cs_decoder_decoded_andMatrixOutputs_T_5,
&_cs_decoder_decoded_andMatrixOutputs_T_8,
&_cs_decoder_decoded_andMatrixOutputs_T_10,
&_cs_decoder_decoded_andMatrixOutputs_T_14,
&_cs_decoder_decoded_andMatrixOutputs_T_15,
&_cs_decoder_decoded_andMatrixOutputs_T_16,
&_cs_decoder_decoded_andMatrixOutputs_T_25,
&_cs_decoder_decoded_andMatrixOutputs_T_33,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[2], io_enq_uop_inst[5], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12]},
&_cs_decoder_decoded_andMatrixOutputs_T_43,
&_cs_decoder_decoded_andMatrixOutputs_T_45,
&_cs_decoder_decoded_andMatrixOutputs_T_47,
&_cs_decoder_decoded_andMatrixOutputs_T_50,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[5], io_enq_uop_inst[6], io_enq_uop_inst[12], cs_decoder_decoded_invInputs[11]},
&_cs_decoder_decoded_andMatrixOutputs_T_61,
&_cs_decoder_decoded_andMatrixOutputs_T_66,
&_cs_decoder_decoded_andMatrixOutputs_T_70,
&_cs_decoder_decoded_andMatrixOutputs_T_73,
&_cs_decoder_decoded_andMatrixOutputs_T_81,
&_cs_decoder_decoded_andMatrixOutputs_T_83,
&_cs_decoder_decoded_andMatrixOutputs_T_84,
&_cs_decoder_decoded_andMatrixOutputs_T_86,
&_cs_decoder_decoded_andMatrixOutputs_T_106,
&_cs_decoder_decoded_andMatrixOutputs_T_115,
&_cs_decoder_decoded_andMatrixOutputs_T_121,
&_cs_decoder_decoded_andMatrixOutputs_T_123,
&_cs_decoder_decoded_andMatrixOutputs_T_125,
&_cs_decoder_decoded_andMatrixOutputs_T_130,
&_cs_decoder_decoded_andMatrixOutputs_T_133,
&_cs_decoder_decoded_andMatrixOutputs_T_134,
&_cs_decoder_decoded_andMatrixOutputs_T_137,
&_cs_decoder_decoded_andMatrixOutputs_T_149,
&_cs_decoder_decoded_andMatrixOutputs_T_151,
&_cs_decoder_decoded_andMatrixOutputs_T_153,
&_cs_decoder_decoded_andMatrixOutputs_T_157,
&_cs_decoder_decoded_andMatrixOutputs_T_162,
&_cs_decoder_decoded_andMatrixOutputs_T_167,
&_cs_decoder_decoded_andMatrixOutputs_T_169} == 41'h0 | (|_cs_decoder_decoded_orMatrixOutputs_T_92) & io_csr_decode_fp_illegal | csr_en & (io_csr_decode_read_illegal | ~csr_ren & io_csr_decode_write_illegal) | (cs_uopc == 7'h6B | cs_csr_cmd == 3'h4) & io_csr_decode_system_illegal;
assign io_deq_uop_exc_cause = _GEN ? io_interrupt_cause : {60'h0, io_enq_uop_bp_debug_if ? 4'hE : io_enq_uop_bp_xcpt_if ? 4'h3 : io_enq_uop_xcpt_pf_if ? 4'hC : {2'h0, io_enq_uop_xcpt_ae_if ? 2'h1 : 2'h2}};
assign io_deq_uop_bypassable = |{&_cs_decoder_decoded_andMatrixOutputs_T_7, &_cs_decoder_decoded_andMatrixOutputs_T_8, &_cs_decoder_decoded_andMatrixOutputs_T_14, &_cs_decoder_decoded_andMatrixOutputs_T_17, &_cs_decoder_decoded_andMatrixOutputs_T_21, &_cs_decoder_decoded_andMatrixOutputs_T_48, &_cs_decoder_decoded_andMatrixOutputs_T_50, &_cs_decoder_decoded_andMatrixOutputs_T_66, &_cs_decoder_decoded_andMatrixOutputs_T_83, &_cs_decoder_decoded_andMatrixOutputs_T_84, &_cs_decoder_decoded_andMatrixOutputs_T_86};
assign io_deq_uop_mem_cmd = cs_mem_cmd;
assign io_deq_uop_mem_size = cs_mem_cmd == 5'h14 | cs_mem_cmd == 5'h5 ? {|(io_enq_uop_inst[24:20]), |_uop_lrs1_T} : io_enq_uop_inst[13:12];
assign io_deq_uop_mem_signed = ~(io_enq_uop_inst[14]);
assign io_deq_uop_is_fence = &_cs_decoder_decoded_andMatrixOutputs_T_6;
assign io_deq_uop_is_fencei = &{io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], io_enq_uop_inst[3], cs_decoder_decoded_invInputs[2], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[12], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12]};
assign io_deq_uop_is_amo = |{&_cs_decoder_decoded_andMatrixOutputs_T_70, &_cs_decoder_decoded_andMatrixOutputs_T_115};
assign io_deq_uop_uses_ldq = |{&_cs_decoder_decoded_andMatrixOutputs_T_1, &_cs_decoder_decoded_andMatrixOutputs_T_2, &_cs_decoder_decoded_andMatrixOutputs_T_62, &_cs_decoder_decoded_andMatrixOutputs_T_118};
assign io_deq_uop_uses_stq = |{&_cs_decoder_decoded_andMatrixOutputs_T_6, &_cs_decoder_decoded_andMatrixOutputs_T_13, &_cs_decoder_decoded_andMatrixOutputs_T_68, &_cs_decoder_decoded_andMatrixOutputs_T_70, &_cs_decoder_decoded_andMatrixOutputs_T_115};
assign io_deq_uop_is_sys_pc2epc = &_cs_decoder_decoded_andMatrixOutputs_T_45;
assign io_deq_uop_is_unique = |{&_cs_decoder_decoded_andMatrixOutputs_T_5, &_cs_decoder_decoded_andMatrixOutputs_T_45, &_cs_decoder_decoded_andMatrixOutputs_T_58, &_cs_decoder_decoded_andMatrixOutputs_T_70, &_cs_decoder_decoded_andMatrixOutputs_T_73, &_cs_decoder_decoded_andMatrixOutputs_T_115, &_cs_decoder_decoded_andMatrixOutputs_T_121, &_cs_decoder_decoded_andMatrixOutputs_T_123, &_cs_decoder_decoded_andMatrixOutputs_T_125, &_cs_decoder_decoded_andMatrixOutputs_T_130, &_cs_decoder_decoded_andMatrixOutputs_T_157};
assign io_deq_uop_flush_on_commit = (|{&_cs_decoder_decoded_andMatrixOutputs_T_5, &_cs_decoder_decoded_andMatrixOutputs_T_44, &_cs_decoder_decoded_andMatrixOutputs_T_58, &_cs_decoder_decoded_andMatrixOutputs_T_70, &_cs_decoder_decoded_andMatrixOutputs_T_73, &_cs_decoder_decoded_andMatrixOutputs_T_115, &_cs_decoder_decoded_andMatrixOutputs_T_118, &_cs_decoder_decoded_andMatrixOutputs_T_122, &_cs_decoder_decoded_andMatrixOutputs_T_124, &_cs_decoder_decoded_andMatrixOutputs_T_127, &_cs_decoder_decoded_andMatrixOutputs_T_156}) | csr_en & ~csr_ren & io_csr_decode_write_flush;
assign io_deq_uop_ldst = {1'h0, io_enq_uop_inst[11:7]};
assign io_deq_uop_lrs1 = {1'h0, _uop_lrs1_T};
assign io_deq_uop_lrs2 = {1'h0, io_enq_uop_inst[24:20]};
assign io_deq_uop_lrs3 = {1'h0, io_enq_uop_inst[31:27]};
assign io_deq_uop_ldst_val = cs_dst_type != 2'h2 & ~(io_enq_uop_inst[11:7] == 5'h0 & cs_dst_type == 2'h0);
assign io_deq_uop_dst_rtype = cs_dst_type;
assign io_deq_uop_lrs1_rtype = {|{&_cs_decoder_decoded_andMatrixOutputs_T_5, &_cs_decoder_decoded_andMatrixOutputs_T_10, &_cs_decoder_decoded_andMatrixOutputs_T_43, &_cs_decoder_decoded_andMatrixOutputs_T_44, &_cs_decoder_decoded_andMatrixOutputs_T_92, &_cs_decoder_decoded_andMatrixOutputs_T_95, &_cs_decoder_decoded_andMatrixOutputs_T_122, &_cs_decoder_decoded_andMatrixOutputs_T_124, &_cs_decoder_decoded_andMatrixOutputs_T_156}, |{&_cs_decoder_decoded_andMatrixOutputs_T_24, &_cs_decoder_decoded_andMatrixOutputs_T_33, &_cs_decoder_decoded_andMatrixOutputs_T_92, &_cs_decoder_decoded_andMatrixOutputs_T_95, &_cs_decoder_decoded_andMatrixOutputs_T_133, &_cs_decoder_decoded_andMatrixOutputs_T_134, &_cs_decoder_decoded_andMatrixOutputs_T_137, &_cs_decoder_decoded_andMatrixOutputs_T_149, &_cs_decoder_decoded_andMatrixOutputs_T_151, &_cs_decoder_decoded_andMatrixOutputs_T_153, &_cs_decoder_decoded_andMatrixOutputs_T_164, &_cs_decoder_decoded_andMatrixOutputs_T_169}};
assign io_deq_uop_lrs2_rtype =
{|{&_cs_decoder_decoded_andMatrixOutputs_T, &_cs_decoder_decoded_andMatrixOutputs_T_2, &_cs_decoder_decoded_andMatrixOutputs_T_5, &_cs_decoder_decoded_andMatrixOutputs_T_8, &_cs_decoder_decoded_andMatrixOutputs_T_10, &_cs_decoder_decoded_andMatrixOutputs_T_42, &_cs_decoder_decoded_andMatrixOutputs_T_43, &_cs_decoder_decoded_andMatrixOutputs_T_44, &_cs_decoder_decoded_andMatrixOutputs_T_48, &{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[3], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[12], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]}, &_cs_decoder_decoded_andMatrixOutputs_T_58, &_cs_decoder_decoded_andMatrixOutputs_T_62, &_cs_decoder_decoded_andMatrixOutputs_T_66, &_cs_decoder_decoded_andMatrixOutputs_T_73, &_cs_decoder_decoded_andMatrixOutputs_T_83, &{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], io_enq_uop_inst[3], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], cs_decoder_decoded_invInputs[4], io_enq_uop_inst[12], cs_decoder_decoded_invInputs[11], io_enq_uop_inst[14], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[29]}, &_cs_decoder_decoded_andMatrixOutputs_T_118, &_cs_decoder_decoded_andMatrixOutputs_T_122, &_cs_decoder_decoded_andMatrixOutputs_T_124, &_cs_decoder_decoded_andMatrixOutputs_T_149, &_cs_decoder_decoded_andMatrixOutputs_T_151, &_cs_decoder_decoded_andMatrixOutputs_T_153, &_cs_decoder_decoded_andMatrixOutputs_T_156, &_cs_decoder_decoded_andMatrixOutputs_T_161, &_cs_decoder_decoded_andMatrixOutputs_T_162},
|{&_cs_decoder_decoded_andMatrixOutputs_T_24, &_cs_decoder_decoded_andMatrixOutputs_T_33, &_cs_decoder_decoded_andMatrixOutputs_T_69, &_cs_decoder_decoded_andMatrixOutputs_T_133, &_cs_decoder_decoded_andMatrixOutputs_T_134, &_cs_decoder_decoded_andMatrixOutputs_T_137}};
assign io_deq_uop_frs3_en = &_cs_decoder_decoded_andMatrixOutputs_T_24;
assign io_deq_uop_fp_val = |_cs_decoder_decoded_orMatrixOutputs_T_92;
assign io_deq_uop_fp_single =
|{&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[2], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[27], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], io_enq_uop_inst[2], cs_decoder_decoded_invInputs[1], cs_decoder_decoded_invInputs[2], cs_decoder_decoded_invInputs[4], cs_decoder_decoded_invInputs[10], io_enq_uop_inst[13], cs_decoder_decoded_invInputs[12]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], io_enq_uop_inst[29], cs_decoder_decoded_invInputs[28]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], io_enq_uop_inst[29], cs_decoder_decoded_invInputs[28]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[26], io_enq_uop_inst[29], cs_decoder_decoded_invInputs[28], cs_decoder_decoded_invInputs[29]},
&_cs_decoder_decoded_andMatrixOutputs_T_149,
&_cs_decoder_decoded_andMatrixOutputs_T_154,
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[27], io_enq_uop_inst[30], io_enq_uop_inst[31]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[10], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[18], cs_decoder_decoded_invInputs[19], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], io_enq_uop_inst[29], io_enq_uop_inst[30], io_enq_uop_inst[31]},
&{io_enq_uop_inst[0], io_enq_uop_inst[1], cs_decoder_decoded_invInputs[0], cs_decoder_decoded_invInputs[1], io_enq_uop_inst[4], cs_decoder_decoded_invInputs[3], io_enq_uop_inst[6], cs_decoder_decoded_invInputs[11], cs_decoder_decoded_invInputs[12], cs_decoder_decoded_invInputs[18], cs_decoder_decoded_invInputs[19], cs_decoder_decoded_invInputs[20], cs_decoder_decoded_invInputs[21], cs_decoder_decoded_invInputs[22], cs_decoder_decoded_invInputs[23], cs_decoder_decoded_invInputs[24], cs_decoder_decoded_invInputs[25], cs_decoder_decoded_invInputs[26], io_enq_uop_inst[29], io_enq_uop_inst[30], io_enq_uop_inst[31]}};
assign io_deq_uop_xcpt_pf_if = io_enq_uop_xcpt_pf_if;
assign io_deq_uop_xcpt_ae_if = io_enq_uop_xcpt_ae_if;
assign io_deq_uop_bp_debug_if = io_enq_uop_bp_debug_if;
assign io_deq_uop_bp_xcpt_if = io_enq_uop_bp_xcpt_if;
assign io_deq_uop_debug_fsrc = io_enq_uop_debug_fsrc;
assign io_csr_decode_inst = io_enq_uop_inst;
endmodule |
Generate the Verilog code corresponding to this Chisel code package boom.v3.ifu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import boom.v3.common._
import boom.v3.util.{BoomCoreStringPrefix, MaskLower, WrapInc}
import scala.math.min
class TageResp extends Bundle {
val ctr = UInt(3.W)
val u = UInt(2.W)
}
class TageTable(val nRows: Int, val tagSz: Int, val histLength: Int, val uBitPeriod: Int)
(implicit p: Parameters) extends BoomModule()(p)
with HasBoomFrontendParameters
{
require(histLength <= globalHistoryLength)
val nWrBypassEntries = 2
val io = IO( new Bundle {
val f1_req_valid = Input(Bool())
val f1_req_pc = Input(UInt(vaddrBitsExtended.W))
val f1_req_ghist = Input(UInt(globalHistoryLength.W))
val f3_resp = Output(Vec(bankWidth, Valid(new TageResp)))
val update_mask = Input(Vec(bankWidth, Bool()))
val update_taken = Input(Vec(bankWidth, Bool()))
val update_alloc = Input(Vec(bankWidth, Bool()))
val update_old_ctr = Input(Vec(bankWidth, UInt(3.W)))
val update_pc = Input(UInt())
val update_hist = Input(UInt())
val update_u_mask = Input(Vec(bankWidth, Bool()))
val update_u = Input(Vec(bankWidth, UInt(2.W)))
})
def compute_folded_hist(hist: UInt, l: Int) = {
val nChunks = (histLength + l - 1) / l
val hist_chunks = (0 until nChunks) map {i =>
hist(min((i+1)*l, histLength)-1, i*l)
}
hist_chunks.reduce(_^_)
}
def compute_tag_and_hash(unhashed_idx: UInt, hist: UInt) = {
val idx_history = compute_folded_hist(hist, log2Ceil(nRows))
val idx = (unhashed_idx ^ idx_history)(log2Ceil(nRows)-1,0)
val tag_history = compute_folded_hist(hist, tagSz)
val tag = ((unhashed_idx >> log2Ceil(nRows)) ^ tag_history)(tagSz-1,0)
(idx, tag)
}
def inc_ctr(ctr: UInt, taken: Bool): UInt = {
Mux(!taken, Mux(ctr === 0.U, 0.U, ctr - 1.U),
Mux(ctr === 7.U, 7.U, ctr + 1.U))
}
val doing_reset = RegInit(true.B)
val reset_idx = RegInit(0.U(log2Ceil(nRows).W))
reset_idx := reset_idx + doing_reset
when (reset_idx === (nRows-1).U) { doing_reset := false.B }
class TageEntry extends Bundle {
val valid = Bool() // TODO: Remove this valid bit
val tag = UInt(tagSz.W)
val ctr = UInt(3.W)
}
val tageEntrySz = 1 + tagSz + 3
val (s1_hashed_idx, s1_tag) = compute_tag_and_hash(fetchIdx(io.f1_req_pc), io.f1_req_ghist)
val hi_us = SyncReadMem(nRows, Vec(bankWidth, Bool()))
val lo_us = SyncReadMem(nRows, Vec(bankWidth, Bool()))
val table = SyncReadMem(nRows, Vec(bankWidth, UInt(tageEntrySz.W)))
val mems = Seq((f"tage_l$histLength", nRows, bankWidth * tageEntrySz))
val s2_tag = RegNext(s1_tag)
val s2_req_rtage = VecInit(table.read(s1_hashed_idx, io.f1_req_valid).map(_.asTypeOf(new TageEntry)))
val s2_req_rhius = hi_us.read(s1_hashed_idx, io.f1_req_valid)
val s2_req_rlous = lo_us.read(s1_hashed_idx, io.f1_req_valid)
val s2_req_rhits = VecInit(s2_req_rtage.map(e => e.valid && e.tag === s2_tag && !doing_reset))
for (w <- 0 until bankWidth) {
// This bit indicates the TAGE table matched here
io.f3_resp(w).valid := RegNext(s2_req_rhits(w))
io.f3_resp(w).bits.u := RegNext(Cat(s2_req_rhius(w), s2_req_rlous(w)))
io.f3_resp(w).bits.ctr := RegNext(s2_req_rtage(w).ctr)
}
val clear_u_ctr = RegInit(0.U((log2Ceil(uBitPeriod) + log2Ceil(nRows) + 1).W))
when (doing_reset) { clear_u_ctr := 1.U } .otherwise { clear_u_ctr := clear_u_ctr + 1.U }
val doing_clear_u = clear_u_ctr(log2Ceil(uBitPeriod)-1,0) === 0.U
val doing_clear_u_hi = doing_clear_u && clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 1.U
val doing_clear_u_lo = doing_clear_u && clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 0.U
val clear_u_idx = clear_u_ctr >> log2Ceil(uBitPeriod)
val (update_idx, update_tag) = compute_tag_and_hash(fetchIdx(io.update_pc), io.update_hist)
val update_wdata = Wire(Vec(bankWidth, new TageEntry))
table.write(
Mux(doing_reset, reset_idx , update_idx),
Mux(doing_reset, VecInit(Seq.fill(bankWidth) { 0.U(tageEntrySz.W) }), VecInit(update_wdata.map(_.asUInt))),
Mux(doing_reset, ~(0.U(bankWidth.W)) , io.update_mask.asUInt).asBools
)
val update_hi_wdata = Wire(Vec(bankWidth, Bool()))
hi_us.write(
Mux(doing_reset, reset_idx, Mux(doing_clear_u_hi, clear_u_idx, update_idx)),
Mux(doing_reset || doing_clear_u_hi, VecInit((0.U(bankWidth.W)).asBools), update_hi_wdata),
Mux(doing_reset || doing_clear_u_hi, ~(0.U(bankWidth.W)), io.update_u_mask.asUInt).asBools
)
val update_lo_wdata = Wire(Vec(bankWidth, Bool()))
lo_us.write(
Mux(doing_reset, reset_idx, Mux(doing_clear_u_lo, clear_u_idx, update_idx)),
Mux(doing_reset || doing_clear_u_lo, VecInit((0.U(bankWidth.W)).asBools), update_lo_wdata),
Mux(doing_reset || doing_clear_u_lo, ~(0.U(bankWidth.W)), io.update_u_mask.asUInt).asBools
)
val wrbypass_tags = Reg(Vec(nWrBypassEntries, UInt(tagSz.W)))
val wrbypass_idxs = Reg(Vec(nWrBypassEntries, UInt(log2Ceil(nRows).W)))
val wrbypass = Reg(Vec(nWrBypassEntries, Vec(bankWidth, UInt(3.W))))
val wrbypass_enq_idx = RegInit(0.U(log2Ceil(nWrBypassEntries).W))
val wrbypass_hits = VecInit((0 until nWrBypassEntries) map { i =>
!doing_reset &&
wrbypass_tags(i) === update_tag &&
wrbypass_idxs(i) === update_idx
})
val wrbypass_hit = wrbypass_hits.reduce(_||_)
val wrbypass_hit_idx = PriorityEncoder(wrbypass_hits)
for (w <- 0 until bankWidth) {
update_wdata(w).ctr := Mux(io.update_alloc(w),
Mux(io.update_taken(w), 4.U,
3.U
),
Mux(wrbypass_hit, inc_ctr(wrbypass(wrbypass_hit_idx)(w), io.update_taken(w)),
inc_ctr(io.update_old_ctr(w), io.update_taken(w))
)
)
update_wdata(w).valid := true.B
update_wdata(w).tag := update_tag
update_hi_wdata(w) := io.update_u(w)(1)
update_lo_wdata(w) := io.update_u(w)(0)
}
when (io.update_mask.reduce(_||_)) {
when (wrbypass_hits.reduce(_||_)) {
wrbypass(wrbypass_hit_idx) := VecInit(update_wdata.map(_.ctr))
} .otherwise {
wrbypass (wrbypass_enq_idx) := VecInit(update_wdata.map(_.ctr))
wrbypass_tags(wrbypass_enq_idx) := update_tag
wrbypass_idxs(wrbypass_enq_idx) := update_idx
wrbypass_enq_idx := WrapInc(wrbypass_enq_idx, nWrBypassEntries)
}
}
}
case class BoomTageParams(
// nSets, histLen, tagSz
tableInfo: Seq[Tuple3[Int, Int, Int]] = Seq(( 128, 2, 7),
( 128, 4, 7),
( 256, 8, 8),
( 256, 16, 8),
( 128, 32, 9),
( 128, 64, 9)),
uBitPeriod: Int = 2048
)
class TageBranchPredictorBank(params: BoomTageParams = BoomTageParams())(implicit p: Parameters) extends BranchPredictorBank()(p)
{
val tageUBitPeriod = params.uBitPeriod
val tageNTables = params.tableInfo.size
class TageMeta extends Bundle
{
val provider = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W)))
val alt_differs = Vec(bankWidth, Output(Bool()))
val provider_u = Vec(bankWidth, Output(UInt(2.W)))
val provider_ctr = Vec(bankWidth, Output(UInt(3.W)))
val allocate = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W)))
}
val f3_meta = Wire(new TageMeta)
override val metaSz = f3_meta.asUInt.getWidth
require(metaSz <= bpdMaxMetaLength)
def inc_u(u: UInt, alt_differs: Bool, mispredict: Bool): UInt = {
Mux(!alt_differs, u,
Mux(mispredict, Mux(u === 0.U, 0.U, u - 1.U),
Mux(u === 3.U, 3.U, u + 1.U)))
}
val tt = params.tableInfo map {
case (n, l, s) => {
val t = Module(new TageTable(n, s, l, params.uBitPeriod))
t.io.f1_req_valid := RegNext(io.f0_valid)
t.io.f1_req_pc := RegNext(io.f0_pc)
t.io.f1_req_ghist := io.f1_ghist
(t, t.mems)
}
}
val tables = tt.map(_._1)
val mems = tt.map(_._2).flatten
val f3_resps = VecInit(tables.map(_.io.f3_resp))
val s1_update_meta = s1_update.bits.meta.asTypeOf(new TageMeta)
val s1_update_mispredict_mask = UIntToOH(s1_update.bits.cfi_idx.bits) &
Fill(bankWidth, s1_update.bits.cfi_mispredicted)
val s1_update_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, Bool()))))
val s1_update_u_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, UInt(1.W)))))
val s1_update_taken = Wire(Vec(tageNTables, Vec(bankWidth, Bool())))
val s1_update_old_ctr = Wire(Vec(tageNTables, Vec(bankWidth, UInt(3.W))))
val s1_update_alloc = Wire(Vec(tageNTables, Vec(bankWidth, Bool())))
val s1_update_u = Wire(Vec(tageNTables, Vec(bankWidth, UInt(2.W))))
s1_update_taken := DontCare
s1_update_old_ctr := DontCare
s1_update_alloc := DontCare
s1_update_u := DontCare
for (w <- 0 until bankWidth) {
var altpred = io.resp_in(0).f3(w).taken
val final_altpred = WireInit(io.resp_in(0).f3(w).taken)
var provided = false.B
var provider = 0.U
io.resp.f3(w).taken := io.resp_in(0).f3(w).taken
for (i <- 0 until tageNTables) {
val hit = f3_resps(i)(w).valid
val ctr = f3_resps(i)(w).bits.ctr
when (hit) {
io.resp.f3(w).taken := Mux(ctr === 3.U || ctr === 4.U, altpred, ctr(2))
final_altpred := altpred
}
provided = provided || hit
provider = Mux(hit, i.U, provider)
altpred = Mux(hit, f3_resps(i)(w).bits.ctr(2), altpred)
}
f3_meta.provider(w).valid := provided
f3_meta.provider(w).bits := provider
f3_meta.alt_differs(w) := final_altpred =/= io.resp.f3(w).taken
f3_meta.provider_u(w) := f3_resps(provider)(w).bits.u
f3_meta.provider_ctr(w) := f3_resps(provider)(w).bits.ctr
// Create a mask of tables which did not hit our query, and also contain useless entries
// and also uses a longer history than the provider
val allocatable_slots = (
VecInit(f3_resps.map(r => !r(w).valid && r(w).bits.u === 0.U)).asUInt &
~(MaskLower(UIntToOH(provider)) & Fill(tageNTables, provided))
)
val alloc_lfsr = random.LFSR(tageNTables max 2)
val first_entry = PriorityEncoder(allocatable_slots)
val masked_entry = PriorityEncoder(allocatable_slots & alloc_lfsr)
val alloc_entry = Mux(allocatable_slots(masked_entry),
masked_entry,
first_entry)
f3_meta.allocate(w).valid := allocatable_slots =/= 0.U
f3_meta.allocate(w).bits := alloc_entry
val update_was_taken = (s1_update.bits.cfi_idx.valid &&
(s1_update.bits.cfi_idx.bits === w.U) &&
s1_update.bits.cfi_taken)
when (s1_update.bits.br_mask(w) && s1_update.valid && s1_update.bits.is_commit_update) {
when (s1_update_meta.provider(w).valid) {
val provider = s1_update_meta.provider(w).bits
s1_update_mask(provider)(w) := true.B
s1_update_u_mask(provider)(w) := true.B
val new_u = inc_u(s1_update_meta.provider_u(w),
s1_update_meta.alt_differs(w),
s1_update_mispredict_mask(w))
s1_update_u (provider)(w) := new_u
s1_update_taken (provider)(w) := update_was_taken
s1_update_old_ctr(provider)(w) := s1_update_meta.provider_ctr(w)
s1_update_alloc (provider)(w) := false.B
}
}
}
when (s1_update.valid && s1_update.bits.is_commit_update && s1_update.bits.cfi_mispredicted && s1_update.bits.cfi_idx.valid) {
val idx = s1_update.bits.cfi_idx.bits
val allocate = s1_update_meta.allocate(idx)
when (allocate.valid) {
s1_update_mask (allocate.bits)(idx) := true.B
s1_update_taken(allocate.bits)(idx) := s1_update.bits.cfi_taken
s1_update_alloc(allocate.bits)(idx) := true.B
s1_update_u_mask(allocate.bits)(idx) := true.B
s1_update_u (allocate.bits)(idx) := 0.U
} .otherwise {
val provider = s1_update_meta.provider(idx)
val decr_mask = Mux(provider.valid, ~MaskLower(UIntToOH(provider.bits)), 0.U)
for (i <- 0 until tageNTables) {
when (decr_mask(i)) {
s1_update_u_mask(i)(idx) := true.B
s1_update_u (i)(idx) := 0.U
}
}
}
}
for (i <- 0 until tageNTables) {
for (w <- 0 until bankWidth) {
tables(i).io.update_mask(w) := RegNext(s1_update_mask(i)(w))
tables(i).io.update_taken(w) := RegNext(s1_update_taken(i)(w))
tables(i).io.update_alloc(w) := RegNext(s1_update_alloc(i)(w))
tables(i).io.update_old_ctr(w) := RegNext(s1_update_old_ctr(i)(w))
tables(i).io.update_u_mask(w) := RegNext(s1_update_u_mask(i)(w))
tables(i).io.update_u(w) := RegNext(s1_update_u(i)(w))
}
tables(i).io.update_pc := RegNext(s1_update.bits.pc)
tables(i).io.update_hist := RegNext(s1_update.bits.ghist)
}
//io.f3_meta := Cat(f3_meta.asUInt, micro.io.f3_meta(micro.metaSz-1,0), base.io.f3_meta(base.metaSz-1, 0))
io.f3_meta := f3_meta.asUInt
} | module hi_us_2(
input [7:0] R0_addr,
input R0_en,
input R0_clk,
output [3:0] R0_data,
input [7:0] W0_addr,
input W0_clk,
input [3:0] W0_data,
input [3:0] W0_mask
);
hi_us_0_ext hi_us_0_ext (
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (1'h1),
.W0_clk (W0_clk),
.W0_data (W0_data),
.W0_mask (W0_mask)
);
endmodule |
Generate the Verilog code corresponding to this Chisel code package testchipip.serdes
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util._
import freechips.rocketchip.tilelink._
trait TLFieldHelper {
def getBodyFields(b: TLChannel): Seq[Data] = b match {
case b: TLBundleA => Seq(b.mask, b.data, b.corrupt)
case b: TLBundleB => Seq(b.mask, b.data, b.corrupt)
case b: TLBundleC => Seq( b.data, b.corrupt)
case b: TLBundleD => Seq( b.data, b.corrupt)
case b: TLBundleE => Seq()
}
def getConstFields(b: TLChannel): Seq[Data] = b match {
case b: TLBundleA => Seq(b.opcode, b.param, b.size, b.source, b.address, b.user, b.echo )
case b: TLBundleB => Seq(b.opcode, b.param, b.size, b.source, b.address )
case b: TLBundleC => Seq(b.opcode, b.param, b.size, b.source, b.address, b.user, b.echo )
case b: TLBundleD => Seq(b.opcode, b.param, b.size, b.source, b.user, b.echo, b.sink, b.denied)
case b: TLBundleE => Seq( b.sink )
}
def minTLPayloadWidth(b: TLChannel): Int = Seq(getBodyFields(b), getConstFields(b)).map(_.map(_.getWidth).sum).max
def minTLPayloadWidth(bs: Seq[TLChannel]): Int = bs.map(b => minTLPayloadWidth(b)).max
def minTLPayloadWidth(b: TLBundle): Int = minTLPayloadWidth(Seq(b.a, b.b, b.c, b.d, b.e).map(_.bits))
}
class TLBeat(val beatWidth: Int) extends Bundle {
val payload = UInt(beatWidth.W)
val head = Bool()
val tail = Bool()
}
abstract class TLChannelToBeat[T <: TLChannel](gen: => T, edge: TLEdge, nameSuffix: Option[String])(implicit val p: Parameters) extends Module with TLFieldHelper {
override def desiredName = (Seq(this.getClass.getSimpleName) ++ nameSuffix ++ Seq(gen.params.shortName)).mkString("_")
val beatWidth = minTLPayloadWidth(gen)
val io = IO(new Bundle {
val protocol = Flipped(Decoupled(gen))
val beat = Decoupled(new TLBeat(beatWidth))
})
def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B
// convert decoupled to irrevocable
val q = Module(new Queue(gen, 1, pipe=true, flow=true))
q.io.enq <> io.protocol
val protocol = q.io.deq
val has_body = Wire(Bool())
val body_fields = getBodyFields(protocol.bits)
val const_fields = getConstFields(protocol.bits)
val head = edge.first(protocol.bits, protocol.fire)
val tail = edge.last(protocol.bits, protocol.fire)
val body = Cat( body_fields.filter(_.getWidth > 0).map(_.asUInt))
val const = Cat(const_fields.filter(_.getWidth > 0).map(_.asUInt))
val is_body = RegInit(false.B)
io.beat.valid := protocol.valid
protocol.ready := io.beat.ready && (is_body || !has_body)
io.beat.bits.head := head && !is_body
io.beat.bits.tail := tail && (is_body || !has_body)
io.beat.bits.payload := Mux(is_body, body, const)
when (io.beat.fire && io.beat.bits.head) { is_body := true.B }
when (io.beat.fire && io.beat.bits.tail) { is_body := false.B }
}
abstract class TLChannelFromBeat[T <: TLChannel](gen: => T, nameSuffix: Option[String])(implicit val p: Parameters) extends Module with TLFieldHelper {
override def desiredName = (Seq(this.getClass.getSimpleName) ++ nameSuffix ++ Seq(gen.params.shortName)).mkString("_")
val beatWidth = minTLPayloadWidth(gen)
val io = IO(new Bundle {
val protocol = Decoupled(gen)
val beat = Flipped(Decoupled(new TLBeat(beatWidth)))
})
// Handle size = 1 gracefully (Chisel3 empty range is broken)
def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0)
val protocol = Wire(Decoupled(gen))
io.protocol <> protocol
val body_fields = getBodyFields(protocol.bits)
val const_fields = getConstFields(protocol.bits)
val is_const = RegInit(true.B)
val const_reg = Reg(UInt(const_fields.map(_.getWidth).sum.W))
val const = Mux(io.beat.bits.head, io.beat.bits.payload, const_reg)
io.beat.ready := (is_const && !io.beat.bits.tail) || protocol.ready
protocol.valid := (!is_const || io.beat.bits.tail) && io.beat.valid
def assign(i: UInt, sigs: Seq[Data]) = {
var t = i
for (s <- sigs.reverse) {
s := t.asTypeOf(s.cloneType)
t = t >> s.getWidth
}
}
assign(const, const_fields)
assign(io.beat.bits.payload, body_fields)
when (io.beat.fire && io.beat.bits.head) { is_const := false.B; const_reg := io.beat.bits.payload }
when (io.beat.fire && io.beat.bits.tail) { is_const := true.B }
}
class TLAToBeat(edgeIn: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleA(bundle), edgeIn, nameSuffix)(p) {
has_body := edgeIn.hasData(protocol.bits) || (~protocol.bits.mask =/= 0.U)
}
class TLAFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleA(bundle), nameSuffix)(p) {
when (io.beat.bits.head) { io.protocol.bits.mask := ~(0.U(io.protocol.bits.mask.getWidth.W)) }
}
class TLBToBeat(edgeOut: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleB(bundle), edgeOut, nameSuffix)(p) {
has_body := edgeOut.hasData(protocol.bits) || (~protocol.bits.mask =/= 0.U)
}
class TLBFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleB(bundle), nameSuffix)(p) {
when (io.beat.bits.head) { io.protocol.bits.mask := ~(0.U(io.protocol.bits.mask.getWidth.W)) }
}
class TLCToBeat(edgeIn: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleC(bundle), edgeIn, nameSuffix)(p) {
has_body := edgeIn.hasData(protocol.bits)
}
class TLCFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleC(bundle), nameSuffix)(p)
class TLDToBeat(edgeOut: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleD(bundle), edgeOut, nameSuffix)(p) {
has_body := edgeOut.hasData(protocol.bits)
}
class TLDFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleD(bundle), nameSuffix)(p)
class TLEToBeat(edgeIn: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleE(bundle), edgeIn, nameSuffix)(p) {
has_body := edgeIn.hasData(protocol.bits)
}
class TLEFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleE(bundle), nameSuffix)(p) | module TLCFromBeat_serial_tl_0_a64d64s8k8z8c(
input clock,
input reset,
output io_beat_ready,
input io_beat_valid,
input io_beat_bits_head,
input io_beat_bits_tail
);
reg is_const;
wire io_beat_ready_0 = is_const & ~io_beat_bits_tail;
wire _GEN = io_beat_ready_0 & io_beat_valid;
always @(posedge clock) begin
if (reset)
is_const <= 1'h1;
else
is_const <= _GEN & io_beat_bits_tail | ~(_GEN & io_beat_bits_head) & is_const;
end
assign io_beat_ready = io_beat_ready_0;
endmodule |
Generate the Verilog code corresponding to this Chisel code package boom.v3.ifu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import boom.v3.common._
import boom.v3.util.{BoomCoreStringPrefix, MaskLower, WrapInc}
import scala.math.min
class TageResp extends Bundle {
val ctr = UInt(3.W)
val u = UInt(2.W)
}
class TageTable(val nRows: Int, val tagSz: Int, val histLength: Int, val uBitPeriod: Int)
(implicit p: Parameters) extends BoomModule()(p)
with HasBoomFrontendParameters
{
require(histLength <= globalHistoryLength)
val nWrBypassEntries = 2
val io = IO( new Bundle {
val f1_req_valid = Input(Bool())
val f1_req_pc = Input(UInt(vaddrBitsExtended.W))
val f1_req_ghist = Input(UInt(globalHistoryLength.W))
val f3_resp = Output(Vec(bankWidth, Valid(new TageResp)))
val update_mask = Input(Vec(bankWidth, Bool()))
val update_taken = Input(Vec(bankWidth, Bool()))
val update_alloc = Input(Vec(bankWidth, Bool()))
val update_old_ctr = Input(Vec(bankWidth, UInt(3.W)))
val update_pc = Input(UInt())
val update_hist = Input(UInt())
val update_u_mask = Input(Vec(bankWidth, Bool()))
val update_u = Input(Vec(bankWidth, UInt(2.W)))
})
def compute_folded_hist(hist: UInt, l: Int) = {
val nChunks = (histLength + l - 1) / l
val hist_chunks = (0 until nChunks) map {i =>
hist(min((i+1)*l, histLength)-1, i*l)
}
hist_chunks.reduce(_^_)
}
def compute_tag_and_hash(unhashed_idx: UInt, hist: UInt) = {
val idx_history = compute_folded_hist(hist, log2Ceil(nRows))
val idx = (unhashed_idx ^ idx_history)(log2Ceil(nRows)-1,0)
val tag_history = compute_folded_hist(hist, tagSz)
val tag = ((unhashed_idx >> log2Ceil(nRows)) ^ tag_history)(tagSz-1,0)
(idx, tag)
}
def inc_ctr(ctr: UInt, taken: Bool): UInt = {
Mux(!taken, Mux(ctr === 0.U, 0.U, ctr - 1.U),
Mux(ctr === 7.U, 7.U, ctr + 1.U))
}
val doing_reset = RegInit(true.B)
val reset_idx = RegInit(0.U(log2Ceil(nRows).W))
reset_idx := reset_idx + doing_reset
when (reset_idx === (nRows-1).U) { doing_reset := false.B }
class TageEntry extends Bundle {
val valid = Bool() // TODO: Remove this valid bit
val tag = UInt(tagSz.W)
val ctr = UInt(3.W)
}
val tageEntrySz = 1 + tagSz + 3
val (s1_hashed_idx, s1_tag) = compute_tag_and_hash(fetchIdx(io.f1_req_pc), io.f1_req_ghist)
val hi_us = SyncReadMem(nRows, Vec(bankWidth, Bool()))
val lo_us = SyncReadMem(nRows, Vec(bankWidth, Bool()))
val table = SyncReadMem(nRows, Vec(bankWidth, UInt(tageEntrySz.W)))
val mems = Seq((f"tage_l$histLength", nRows, bankWidth * tageEntrySz))
val s2_tag = RegNext(s1_tag)
val s2_req_rtage = VecInit(table.read(s1_hashed_idx, io.f1_req_valid).map(_.asTypeOf(new TageEntry)))
val s2_req_rhius = hi_us.read(s1_hashed_idx, io.f1_req_valid)
val s2_req_rlous = lo_us.read(s1_hashed_idx, io.f1_req_valid)
val s2_req_rhits = VecInit(s2_req_rtage.map(e => e.valid && e.tag === s2_tag && !doing_reset))
for (w <- 0 until bankWidth) {
// This bit indicates the TAGE table matched here
io.f3_resp(w).valid := RegNext(s2_req_rhits(w))
io.f3_resp(w).bits.u := RegNext(Cat(s2_req_rhius(w), s2_req_rlous(w)))
io.f3_resp(w).bits.ctr := RegNext(s2_req_rtage(w).ctr)
}
val clear_u_ctr = RegInit(0.U((log2Ceil(uBitPeriod) + log2Ceil(nRows) + 1).W))
when (doing_reset) { clear_u_ctr := 1.U } .otherwise { clear_u_ctr := clear_u_ctr + 1.U }
val doing_clear_u = clear_u_ctr(log2Ceil(uBitPeriod)-1,0) === 0.U
val doing_clear_u_hi = doing_clear_u && clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 1.U
val doing_clear_u_lo = doing_clear_u && clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 0.U
val clear_u_idx = clear_u_ctr >> log2Ceil(uBitPeriod)
val (update_idx, update_tag) = compute_tag_and_hash(fetchIdx(io.update_pc), io.update_hist)
val update_wdata = Wire(Vec(bankWidth, new TageEntry))
table.write(
Mux(doing_reset, reset_idx , update_idx),
Mux(doing_reset, VecInit(Seq.fill(bankWidth) { 0.U(tageEntrySz.W) }), VecInit(update_wdata.map(_.asUInt))),
Mux(doing_reset, ~(0.U(bankWidth.W)) , io.update_mask.asUInt).asBools
)
val update_hi_wdata = Wire(Vec(bankWidth, Bool()))
hi_us.write(
Mux(doing_reset, reset_idx, Mux(doing_clear_u_hi, clear_u_idx, update_idx)),
Mux(doing_reset || doing_clear_u_hi, VecInit((0.U(bankWidth.W)).asBools), update_hi_wdata),
Mux(doing_reset || doing_clear_u_hi, ~(0.U(bankWidth.W)), io.update_u_mask.asUInt).asBools
)
val update_lo_wdata = Wire(Vec(bankWidth, Bool()))
lo_us.write(
Mux(doing_reset, reset_idx, Mux(doing_clear_u_lo, clear_u_idx, update_idx)),
Mux(doing_reset || doing_clear_u_lo, VecInit((0.U(bankWidth.W)).asBools), update_lo_wdata),
Mux(doing_reset || doing_clear_u_lo, ~(0.U(bankWidth.W)), io.update_u_mask.asUInt).asBools
)
val wrbypass_tags = Reg(Vec(nWrBypassEntries, UInt(tagSz.W)))
val wrbypass_idxs = Reg(Vec(nWrBypassEntries, UInt(log2Ceil(nRows).W)))
val wrbypass = Reg(Vec(nWrBypassEntries, Vec(bankWidth, UInt(3.W))))
val wrbypass_enq_idx = RegInit(0.U(log2Ceil(nWrBypassEntries).W))
val wrbypass_hits = VecInit((0 until nWrBypassEntries) map { i =>
!doing_reset &&
wrbypass_tags(i) === update_tag &&
wrbypass_idxs(i) === update_idx
})
val wrbypass_hit = wrbypass_hits.reduce(_||_)
val wrbypass_hit_idx = PriorityEncoder(wrbypass_hits)
for (w <- 0 until bankWidth) {
update_wdata(w).ctr := Mux(io.update_alloc(w),
Mux(io.update_taken(w), 4.U,
3.U
),
Mux(wrbypass_hit, inc_ctr(wrbypass(wrbypass_hit_idx)(w), io.update_taken(w)),
inc_ctr(io.update_old_ctr(w), io.update_taken(w))
)
)
update_wdata(w).valid := true.B
update_wdata(w).tag := update_tag
update_hi_wdata(w) := io.update_u(w)(1)
update_lo_wdata(w) := io.update_u(w)(0)
}
when (io.update_mask.reduce(_||_)) {
when (wrbypass_hits.reduce(_||_)) {
wrbypass(wrbypass_hit_idx) := VecInit(update_wdata.map(_.ctr))
} .otherwise {
wrbypass (wrbypass_enq_idx) := VecInit(update_wdata.map(_.ctr))
wrbypass_tags(wrbypass_enq_idx) := update_tag
wrbypass_idxs(wrbypass_enq_idx) := update_idx
wrbypass_enq_idx := WrapInc(wrbypass_enq_idx, nWrBypassEntries)
}
}
}
case class BoomTageParams(
// nSets, histLen, tagSz
tableInfo: Seq[Tuple3[Int, Int, Int]] = Seq(( 128, 2, 7),
( 128, 4, 7),
( 256, 8, 8),
( 256, 16, 8),
( 128, 32, 9),
( 128, 64, 9)),
uBitPeriod: Int = 2048
)
class TageBranchPredictorBank(params: BoomTageParams = BoomTageParams())(implicit p: Parameters) extends BranchPredictorBank()(p)
{
val tageUBitPeriod = params.uBitPeriod
val tageNTables = params.tableInfo.size
class TageMeta extends Bundle
{
val provider = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W)))
val alt_differs = Vec(bankWidth, Output(Bool()))
val provider_u = Vec(bankWidth, Output(UInt(2.W)))
val provider_ctr = Vec(bankWidth, Output(UInt(3.W)))
val allocate = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W)))
}
val f3_meta = Wire(new TageMeta)
override val metaSz = f3_meta.asUInt.getWidth
require(metaSz <= bpdMaxMetaLength)
def inc_u(u: UInt, alt_differs: Bool, mispredict: Bool): UInt = {
Mux(!alt_differs, u,
Mux(mispredict, Mux(u === 0.U, 0.U, u - 1.U),
Mux(u === 3.U, 3.U, u + 1.U)))
}
val tt = params.tableInfo map {
case (n, l, s) => {
val t = Module(new TageTable(n, s, l, params.uBitPeriod))
t.io.f1_req_valid := RegNext(io.f0_valid)
t.io.f1_req_pc := RegNext(io.f0_pc)
t.io.f1_req_ghist := io.f1_ghist
(t, t.mems)
}
}
val tables = tt.map(_._1)
val mems = tt.map(_._2).flatten
val f3_resps = VecInit(tables.map(_.io.f3_resp))
val s1_update_meta = s1_update.bits.meta.asTypeOf(new TageMeta)
val s1_update_mispredict_mask = UIntToOH(s1_update.bits.cfi_idx.bits) &
Fill(bankWidth, s1_update.bits.cfi_mispredicted)
val s1_update_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, Bool()))))
val s1_update_u_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, UInt(1.W)))))
val s1_update_taken = Wire(Vec(tageNTables, Vec(bankWidth, Bool())))
val s1_update_old_ctr = Wire(Vec(tageNTables, Vec(bankWidth, UInt(3.W))))
val s1_update_alloc = Wire(Vec(tageNTables, Vec(bankWidth, Bool())))
val s1_update_u = Wire(Vec(tageNTables, Vec(bankWidth, UInt(2.W))))
s1_update_taken := DontCare
s1_update_old_ctr := DontCare
s1_update_alloc := DontCare
s1_update_u := DontCare
for (w <- 0 until bankWidth) {
var altpred = io.resp_in(0).f3(w).taken
val final_altpred = WireInit(io.resp_in(0).f3(w).taken)
var provided = false.B
var provider = 0.U
io.resp.f3(w).taken := io.resp_in(0).f3(w).taken
for (i <- 0 until tageNTables) {
val hit = f3_resps(i)(w).valid
val ctr = f3_resps(i)(w).bits.ctr
when (hit) {
io.resp.f3(w).taken := Mux(ctr === 3.U || ctr === 4.U, altpred, ctr(2))
final_altpred := altpred
}
provided = provided || hit
provider = Mux(hit, i.U, provider)
altpred = Mux(hit, f3_resps(i)(w).bits.ctr(2), altpred)
}
f3_meta.provider(w).valid := provided
f3_meta.provider(w).bits := provider
f3_meta.alt_differs(w) := final_altpred =/= io.resp.f3(w).taken
f3_meta.provider_u(w) := f3_resps(provider)(w).bits.u
f3_meta.provider_ctr(w) := f3_resps(provider)(w).bits.ctr
// Create a mask of tables which did not hit our query, and also contain useless entries
// and also uses a longer history than the provider
val allocatable_slots = (
VecInit(f3_resps.map(r => !r(w).valid && r(w).bits.u === 0.U)).asUInt &
~(MaskLower(UIntToOH(provider)) & Fill(tageNTables, provided))
)
val alloc_lfsr = random.LFSR(tageNTables max 2)
val first_entry = PriorityEncoder(allocatable_slots)
val masked_entry = PriorityEncoder(allocatable_slots & alloc_lfsr)
val alloc_entry = Mux(allocatable_slots(masked_entry),
masked_entry,
first_entry)
f3_meta.allocate(w).valid := allocatable_slots =/= 0.U
f3_meta.allocate(w).bits := alloc_entry
val update_was_taken = (s1_update.bits.cfi_idx.valid &&
(s1_update.bits.cfi_idx.bits === w.U) &&
s1_update.bits.cfi_taken)
when (s1_update.bits.br_mask(w) && s1_update.valid && s1_update.bits.is_commit_update) {
when (s1_update_meta.provider(w).valid) {
val provider = s1_update_meta.provider(w).bits
s1_update_mask(provider)(w) := true.B
s1_update_u_mask(provider)(w) := true.B
val new_u = inc_u(s1_update_meta.provider_u(w),
s1_update_meta.alt_differs(w),
s1_update_mispredict_mask(w))
s1_update_u (provider)(w) := new_u
s1_update_taken (provider)(w) := update_was_taken
s1_update_old_ctr(provider)(w) := s1_update_meta.provider_ctr(w)
s1_update_alloc (provider)(w) := false.B
}
}
}
when (s1_update.valid && s1_update.bits.is_commit_update && s1_update.bits.cfi_mispredicted && s1_update.bits.cfi_idx.valid) {
val idx = s1_update.bits.cfi_idx.bits
val allocate = s1_update_meta.allocate(idx)
when (allocate.valid) {
s1_update_mask (allocate.bits)(idx) := true.B
s1_update_taken(allocate.bits)(idx) := s1_update.bits.cfi_taken
s1_update_alloc(allocate.bits)(idx) := true.B
s1_update_u_mask(allocate.bits)(idx) := true.B
s1_update_u (allocate.bits)(idx) := 0.U
} .otherwise {
val provider = s1_update_meta.provider(idx)
val decr_mask = Mux(provider.valid, ~MaskLower(UIntToOH(provider.bits)), 0.U)
for (i <- 0 until tageNTables) {
when (decr_mask(i)) {
s1_update_u_mask(i)(idx) := true.B
s1_update_u (i)(idx) := 0.U
}
}
}
}
for (i <- 0 until tageNTables) {
for (w <- 0 until bankWidth) {
tables(i).io.update_mask(w) := RegNext(s1_update_mask(i)(w))
tables(i).io.update_taken(w) := RegNext(s1_update_taken(i)(w))
tables(i).io.update_alloc(w) := RegNext(s1_update_alloc(i)(w))
tables(i).io.update_old_ctr(w) := RegNext(s1_update_old_ctr(i)(w))
tables(i).io.update_u_mask(w) := RegNext(s1_update_u_mask(i)(w))
tables(i).io.update_u(w) := RegNext(s1_update_u(i)(w))
}
tables(i).io.update_pc := RegNext(s1_update.bits.pc)
tables(i).io.update_hist := RegNext(s1_update.bits.ghist)
}
//io.f3_meta := Cat(f3_meta.asUInt, micro.io.f3_meta(micro.metaSz-1,0), base.io.f3_meta(base.metaSz-1, 0))
io.f3_meta := f3_meta.asUInt
} | module table_1(
input [7:0] R0_addr,
input R0_en,
input R0_clk,
output [47:0] R0_data,
input [7:0] W0_addr,
input W0_clk,
input [47:0] W0_data,
input [3:0] W0_mask
);
table_0_ext table_0_ext (
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (1'h1),
.W0_clk (W0_clk),
.W0_data (W0_data),
.W0_mask (W0_mask)
);
endmodule |
Generate the Verilog code corresponding to this Chisel code /*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import freechips.rocketchip.util._
case class ListBufferParameters[T <: Data](gen: T, queues: Int, entries: Int, bypass: Boolean)
{
val queueBits = log2Up(queues)
val entryBits = log2Up(entries)
}
class ListBufferPush[T <: Data](params: ListBufferParameters[T]) extends Bundle
{
val index = UInt(params.queueBits.W)
val data = Output(params.gen)
}
class ListBuffer[T <: Data](params: ListBufferParameters[T]) extends Module
{
override def desiredName = s"ListBuffer_${params.gen.typeName}_q${params.queues}_e${params.entries}"
val io = IO(new Bundle {
// push is visible on the same cycle; flow queues
val push = Flipped(Decoupled(new ListBufferPush(params)))
val valid = UInt(params.queues.W)
val pop = Flipped(Valid(UInt(params.queueBits.W)))
val data = Output(params.gen)
})
val valid = RegInit(0.U(params.queues.W))
val head = Mem(params.queues, UInt(params.entryBits.W))
val tail = Mem(params.queues, UInt(params.entryBits.W))
val used = RegInit(0.U(params.entries.W))
val next = Mem(params.entries, UInt(params.entryBits.W))
val data = Mem(params.entries, params.gen)
val freeOH = ~(leftOR(~used) << 1) & ~used
val freeIdx = OHToUInt(freeOH)
val valid_set = WireDefault(0.U(params.queues.W))
val valid_clr = WireDefault(0.U(params.queues.W))
val used_set = WireDefault(0.U(params.entries.W))
val used_clr = WireDefault(0.U(params.entries.W))
val push_tail = tail.read(io.push.bits.index)
val push_valid = valid(io.push.bits.index)
io.push.ready := !used.andR
when (io.push.fire) {
valid_set := UIntToOH(io.push.bits.index, params.queues)
used_set := freeOH
data.write(freeIdx, io.push.bits.data)
when (push_valid) {
next.write(push_tail, freeIdx)
} .otherwise {
head.write(io.push.bits.index, freeIdx)
}
tail.write(io.push.bits.index, freeIdx)
}
val pop_head = head.read(io.pop.bits)
val pop_valid = valid(io.pop.bits)
// Bypass push data to the peek port
io.data := (if (!params.bypass) data.read(pop_head) else Mux(!pop_valid, io.push.bits.data, data.read(pop_head)))
io.valid := (if (!params.bypass) valid else (valid | valid_set))
// It is an error to pop something that is not valid
assert (!io.pop.fire || (io.valid)(io.pop.bits))
when (io.pop.fire) {
used_clr := UIntToOH(pop_head, params.entries)
when (pop_head === tail.read(io.pop.bits)) {
valid_clr := UIntToOH(io.pop.bits, params.queues)
}
head.write(io.pop.bits, Mux(io.push.fire && push_valid && push_tail === pop_head, freeIdx, next.read(pop_head)))
}
// Empty bypass changes no state
when ((!params.bypass).B || !io.pop.valid || pop_valid) {
used := (used & ~used_clr) | used_set
valid := (valid & ~valid_clr) | valid_set
}
} | module head_21x6(
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [5:0] R0_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [5:0] W0_data,
input [4:0] W1_addr,
input W1_en,
input W1_clk,
input [5:0] W1_data
);
reg [5:0] Memory[0:20];
always @(posedge W0_clk) begin
if (W0_en & 1'h1)
Memory[W0_addr] <= W0_data;
if (W1_en & 1'h1)
Memory[W1_addr] <= W1_data;
end
assign R0_data = R0_en ? Memory[R0_addr] : 6'bx;
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tile
import chisel3._
import chisel3.util._
import chisel3.{DontCare, WireInit, withClock, withReset}
import chisel3.experimental.SourceInfo
import chisel3.experimental.dataview._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.rocket._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property
case class FPUParams(
minFLen: Int = 32,
fLen: Int = 64,
divSqrt: Boolean = true,
sfmaLatency: Int = 3,
dfmaLatency: Int = 4,
fpmuLatency: Int = 2,
ifpuLatency: Int = 2
)
object FPConstants
{
val RM_SZ = 3
val FLAGS_SZ = 5
}
trait HasFPUCtrlSigs {
val ldst = Bool()
val wen = Bool()
val ren1 = Bool()
val ren2 = Bool()
val ren3 = Bool()
val swap12 = Bool()
val swap23 = Bool()
val typeTagIn = UInt(2.W)
val typeTagOut = UInt(2.W)
val fromint = Bool()
val toint = Bool()
val fastpipe = Bool()
val fma = Bool()
val div = Bool()
val sqrt = Bool()
val wflags = Bool()
val vec = Bool()
}
class FPUCtrlSigs extends Bundle with HasFPUCtrlSigs
class FPUDecoder(implicit p: Parameters) extends FPUModule()(p) {
val io = IO(new Bundle {
val inst = Input(Bits(32.W))
val sigs = Output(new FPUCtrlSigs())
})
private val X2 = BitPat.dontCare(2)
val default = List(X,X,X,X,X,X,X,X2,X2,X,X,X,X,X,X,X,N)
val h: Array[(BitPat, List[BitPat])] =
Array(FLH -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSH -> List(Y,N,N,Y,N,Y,X, I, H,N,Y,N,N,N,N,N,N),
FMV_H_X -> List(N,Y,N,N,N,X,X, H, I,Y,N,N,N,N,N,N,N),
FCVT_H_W -> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_WU-> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_L -> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_LU-> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FMV_X_H -> List(N,N,Y,N,N,N,X, I, H,N,Y,N,N,N,N,N,N),
FCLASS_H -> List(N,N,Y,N,N,N,X, H, H,N,Y,N,N,N,N,N,N),
FCVT_W_H -> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_H-> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_H -> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_H-> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_S_H -> List(N,Y,Y,N,N,N,X, H, S,N,N,Y,N,N,N,Y,N),
FCVT_H_S -> List(N,Y,Y,N,N,N,X, S, H,N,N,Y,N,N,N,Y,N),
FEQ_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FLT_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FLE_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FSGNJ_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FSGNJN_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FSGNJX_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FMIN_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,Y,N),
FMAX_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,Y,N),
FADD_H -> List(N,Y,Y,Y,N,N,Y, H, H,N,N,N,Y,N,N,Y,N),
FSUB_H -> List(N,Y,Y,Y,N,N,Y, H, H,N,N,N,Y,N,N,Y,N),
FMUL_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,N,Y,N,N,Y,N),
FMADD_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FMSUB_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FNMADD_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FNMSUB_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FDIV_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,N,N,Y,N,Y,N),
FSQRT_H -> List(N,Y,Y,N,N,N,X, H, H,N,N,N,N,N,Y,Y,N))
val f: Array[(BitPat, List[BitPat])] =
Array(FLW -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSW -> List(Y,N,N,Y,N,Y,X, I, S,N,Y,N,N,N,N,N,N),
FMV_W_X -> List(N,Y,N,N,N,X,X, S, I,Y,N,N,N,N,N,N,N),
FCVT_S_W -> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_WU-> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_L -> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_LU-> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FMV_X_W -> List(N,N,Y,N,N,N,X, I, S,N,Y,N,N,N,N,N,N),
FCLASS_S -> List(N,N,Y,N,N,N,X, S, S,N,Y,N,N,N,N,N,N),
FCVT_W_S -> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_S-> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_S -> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_S-> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FEQ_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FLT_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FLE_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FSGNJ_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FSGNJN_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FSGNJX_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FMIN_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,Y,N),
FMAX_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,Y,N),
FADD_S -> List(N,Y,Y,Y,N,N,Y, S, S,N,N,N,Y,N,N,Y,N),
FSUB_S -> List(N,Y,Y,Y,N,N,Y, S, S,N,N,N,Y,N,N,Y,N),
FMUL_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,N,Y,N,N,Y,N),
FMADD_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FMSUB_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FNMADD_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FNMSUB_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FDIV_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,N,N,Y,N,Y,N),
FSQRT_S -> List(N,Y,Y,N,N,N,X, S, S,N,N,N,N,N,Y,Y,N))
val d: Array[(BitPat, List[BitPat])] =
Array(FLD -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSD -> List(Y,N,N,Y,N,Y,X, I, D,N,Y,N,N,N,N,N,N),
FMV_D_X -> List(N,Y,N,N,N,X,X, D, I,Y,N,N,N,N,N,N,N),
FCVT_D_W -> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_WU-> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_L -> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_LU-> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FMV_X_D -> List(N,N,Y,N,N,N,X, I, D,N,Y,N,N,N,N,N,N),
FCLASS_D -> List(N,N,Y,N,N,N,X, D, D,N,Y,N,N,N,N,N,N),
FCVT_W_D -> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_D-> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_D -> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_D-> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_S_D -> List(N,Y,Y,N,N,N,X, D, S,N,N,Y,N,N,N,Y,N),
FCVT_D_S -> List(N,Y,Y,N,N,N,X, S, D,N,N,Y,N,N,N,Y,N),
FEQ_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FLT_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FLE_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FSGNJ_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FSGNJN_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FSGNJX_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FMIN_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,Y,N),
FMAX_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,Y,N),
FADD_D -> List(N,Y,Y,Y,N,N,Y, D, D,N,N,N,Y,N,N,Y,N),
FSUB_D -> List(N,Y,Y,Y,N,N,Y, D, D,N,N,N,Y,N,N,Y,N),
FMUL_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,N,Y,N,N,Y,N),
FMADD_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FMSUB_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FNMADD_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FNMSUB_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FDIV_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,N,N,Y,N,Y,N),
FSQRT_D -> List(N,Y,Y,N,N,N,X, D, D,N,N,N,N,N,Y,Y,N))
val fcvt_hd: Array[(BitPat, List[BitPat])] =
Array(FCVT_H_D -> List(N,Y,Y,N,N,N,X, D, H,N,N,Y,N,N,N,Y,N),
FCVT_D_H -> List(N,Y,Y,N,N,N,X, H, D,N,N,Y,N,N,N,Y,N))
val vfmv_f_s: Array[(BitPat, List[BitPat])] =
Array(VFMV_F_S -> List(N,Y,N,N,N,N,X,X2,X2,N,N,N,N,N,N,N,Y))
val insns = ((minFLen, fLen) match {
case (32, 32) => f
case (16, 32) => h ++ f
case (32, 64) => f ++ d
case (16, 64) => h ++ f ++ d ++ fcvt_hd
case other => throw new Exception(s"minFLen = ${minFLen} & fLen = ${fLen} is an unsupported configuration")
}) ++ (if (usingVector) vfmv_f_s else Array[(BitPat, List[BitPat])]())
val decoder = DecodeLogic(io.inst, default, insns)
val s = io.sigs
val sigs = Seq(s.ldst, s.wen, s.ren1, s.ren2, s.ren3, s.swap12,
s.swap23, s.typeTagIn, s.typeTagOut, s.fromint, s.toint,
s.fastpipe, s.fma, s.div, s.sqrt, s.wflags, s.vec)
sigs zip decoder map {case(s,d) => s := d}
}
class FPUCoreIO(implicit p: Parameters) extends CoreBundle()(p) {
val hartid = Input(UInt(hartIdLen.W))
val time = Input(UInt(xLen.W))
val inst = Input(Bits(32.W))
val fromint_data = Input(Bits(xLen.W))
val fcsr_rm = Input(Bits(FPConstants.RM_SZ.W))
val fcsr_flags = Valid(Bits(FPConstants.FLAGS_SZ.W))
val v_sew = Input(UInt(3.W))
val store_data = Output(Bits(fLen.W))
val toint_data = Output(Bits(xLen.W))
val ll_resp_val = Input(Bool())
val ll_resp_type = Input(Bits(3.W))
val ll_resp_tag = Input(UInt(5.W))
val ll_resp_data = Input(Bits(fLen.W))
val valid = Input(Bool())
val fcsr_rdy = Output(Bool())
val nack_mem = Output(Bool())
val illegal_rm = Output(Bool())
val killx = Input(Bool())
val killm = Input(Bool())
val dec = Output(new FPUCtrlSigs())
val sboard_set = Output(Bool())
val sboard_clr = Output(Bool())
val sboard_clra = Output(UInt(5.W))
val keep_clock_enabled = Input(Bool())
}
class FPUIO(implicit p: Parameters) extends FPUCoreIO ()(p) {
val cp_req = Flipped(Decoupled(new FPInput())) //cp doesn't pay attn to kill sigs
val cp_resp = Decoupled(new FPResult())
}
class FPResult(implicit p: Parameters) extends CoreBundle()(p) {
val data = Bits((fLen+1).W)
val exc = Bits(FPConstants.FLAGS_SZ.W)
}
class IntToFPInput(implicit p: Parameters) extends CoreBundle()(p) with HasFPUCtrlSigs {
val rm = Bits(FPConstants.RM_SZ.W)
val typ = Bits(2.W)
val in1 = Bits(xLen.W)
}
class FPInput(implicit p: Parameters) extends CoreBundle()(p) with HasFPUCtrlSigs {
val rm = Bits(FPConstants.RM_SZ.W)
val fmaCmd = Bits(2.W)
val typ = Bits(2.W)
val fmt = Bits(2.W)
val in1 = Bits((fLen+1).W)
val in2 = Bits((fLen+1).W)
val in3 = Bits((fLen+1).W)
}
case class FType(exp: Int, sig: Int) {
def ieeeWidth = exp + sig
def recodedWidth = ieeeWidth + 1
def ieeeQNaN = ((BigInt(1) << (ieeeWidth - 1)) - (BigInt(1) << (sig - 2))).U(ieeeWidth.W)
def qNaN = ((BigInt(7) << (exp + sig - 3)) + (BigInt(1) << (sig - 2))).U(recodedWidth.W)
def isNaN(x: UInt) = x(sig + exp - 1, sig + exp - 3).andR
def isSNaN(x: UInt) = isNaN(x) && !x(sig - 2)
def classify(x: UInt) = {
val sign = x(sig + exp)
val code = x(exp + sig - 1, exp + sig - 3)
val codeHi = code(2, 1)
val isSpecial = codeHi === 3.U
val isHighSubnormalIn = x(exp + sig - 3, sig - 1) < 2.U
val isSubnormal = code === 1.U || codeHi === 1.U && isHighSubnormalIn
val isNormal = codeHi === 1.U && !isHighSubnormalIn || codeHi === 2.U
val isZero = code === 0.U
val isInf = isSpecial && !code(0)
val isNaN = code.andR
val isSNaN = isNaN && !x(sig-2)
val isQNaN = isNaN && x(sig-2)
Cat(isQNaN, isSNaN, isInf && !sign, isNormal && !sign,
isSubnormal && !sign, isZero && !sign, isZero && sign,
isSubnormal && sign, isNormal && sign, isInf && sign)
}
// convert between formats, ignoring rounding, range, NaN
def unsafeConvert(x: UInt, to: FType) = if (this == to) x else {
val sign = x(sig + exp)
val fractIn = x(sig - 2, 0)
val expIn = x(sig + exp - 1, sig - 1)
val fractOut = fractIn << to.sig >> sig
val expOut = {
val expCode = expIn(exp, exp - 2)
val commonCase = (expIn + (1 << to.exp).U) - (1 << exp).U
Mux(expCode === 0.U || expCode >= 6.U, Cat(expCode, commonCase(to.exp - 3, 0)), commonCase(to.exp, 0))
}
Cat(sign, expOut, fractOut)
}
private def ieeeBundle = {
val expWidth = exp
class IEEEBundle extends Bundle {
val sign = Bool()
val exp = UInt(expWidth.W)
val sig = UInt((ieeeWidth-expWidth-1).W)
}
new IEEEBundle
}
def unpackIEEE(x: UInt) = x.asTypeOf(ieeeBundle)
def recode(x: UInt) = hardfloat.recFNFromFN(exp, sig, x)
def ieee(x: UInt) = hardfloat.fNFromRecFN(exp, sig, x)
}
object FType {
val H = new FType(5, 11)
val S = new FType(8, 24)
val D = new FType(11, 53)
val all = List(H, S, D)
}
trait HasFPUParameters {
require(fLen == 0 || FType.all.exists(_.ieeeWidth == fLen))
val minFLen: Int
val fLen: Int
def xLen: Int
val minXLen = 32
val nIntTypes = log2Ceil(xLen/minXLen) + 1
def floatTypes = FType.all.filter(t => minFLen <= t.ieeeWidth && t.ieeeWidth <= fLen)
def minType = floatTypes.head
def maxType = floatTypes.last
def prevType(t: FType) = floatTypes(typeTag(t) - 1)
def maxExpWidth = maxType.exp
def maxSigWidth = maxType.sig
def typeTag(t: FType) = floatTypes.indexOf(t)
def typeTagWbOffset = (FType.all.indexOf(minType) + 1).U
def typeTagGroup(t: FType) = (if (floatTypes.contains(t)) typeTag(t) else typeTag(maxType)).U
// typeTag
def H = typeTagGroup(FType.H)
def S = typeTagGroup(FType.S)
def D = typeTagGroup(FType.D)
def I = typeTag(maxType).U
private def isBox(x: UInt, t: FType): Bool = x(t.sig + t.exp, t.sig + t.exp - 4).andR
private def box(x: UInt, xt: FType, y: UInt, yt: FType): UInt = {
require(xt.ieeeWidth == 2 * yt.ieeeWidth)
val swizzledNaN = Cat(
x(xt.sig + xt.exp, xt.sig + xt.exp - 3),
x(xt.sig - 2, yt.recodedWidth - 1).andR,
x(xt.sig + xt.exp - 5, xt.sig),
y(yt.recodedWidth - 2),
x(xt.sig - 2, yt.recodedWidth - 1),
y(yt.recodedWidth - 1),
y(yt.recodedWidth - 3, 0))
Mux(xt.isNaN(x), swizzledNaN, x)
}
// implement NaN unboxing for FU inputs
def unbox(x: UInt, tag: UInt, exactType: Option[FType]): UInt = {
val outType = exactType.getOrElse(maxType)
def helper(x: UInt, t: FType): Seq[(Bool, UInt)] = {
val prev =
if (t == minType) {
Seq()
} else {
val prevT = prevType(t)
val unswizzled = Cat(
x(prevT.sig + prevT.exp - 1),
x(t.sig - 1),
x(prevT.sig + prevT.exp - 2, 0))
val prev = helper(unswizzled, prevT)
val isbox = isBox(x, t)
prev.map(p => (isbox && p._1, p._2))
}
prev :+ (true.B, t.unsafeConvert(x, outType))
}
val (oks, floats) = helper(x, maxType).unzip
if (exactType.isEmpty || floatTypes.size == 1) {
Mux(oks(tag), floats(tag), maxType.qNaN)
} else {
val t = exactType.get
floats(typeTag(t)) | Mux(oks(typeTag(t)), 0.U, t.qNaN)
}
}
// make sure that the redundant bits in the NaN-boxed encoding are consistent
def consistent(x: UInt): Bool = {
def helper(x: UInt, t: FType): Bool = if (typeTag(t) == 0) true.B else {
val prevT = prevType(t)
val unswizzled = Cat(
x(prevT.sig + prevT.exp - 1),
x(t.sig - 1),
x(prevT.sig + prevT.exp - 2, 0))
val prevOK = !isBox(x, t) || helper(unswizzled, prevT)
val curOK = !t.isNaN(x) || x(t.sig + t.exp - 4) === x(t.sig - 2, prevT.recodedWidth - 1).andR
prevOK && curOK
}
helper(x, maxType)
}
// generate a NaN box from an FU result
def box(x: UInt, t: FType): UInt = {
if (t == maxType) {
x
} else {
val nt = floatTypes(typeTag(t) + 1)
val bigger = box(((BigInt(1) << nt.recodedWidth)-1).U, nt, x, t)
bigger | ((BigInt(1) << maxType.recodedWidth) - (BigInt(1) << nt.recodedWidth)).U
}
}
// generate a NaN box from an FU result
def box(x: UInt, tag: UInt): UInt = {
val opts = floatTypes.map(t => box(x, t))
opts(tag)
}
// zap bits that hardfloat thinks are don't-cares, but we do care about
def sanitizeNaN(x: UInt, t: FType): UInt = {
if (typeTag(t) == 0) {
x
} else {
val maskedNaN = x & ~((BigInt(1) << (t.sig-1)) | (BigInt(1) << (t.sig+t.exp-4))).U(t.recodedWidth.W)
Mux(t.isNaN(x), maskedNaN, x)
}
}
// implement NaN boxing and recoding for FL*/fmv.*.x
def recode(x: UInt, tag: UInt): UInt = {
def helper(x: UInt, t: FType): UInt = {
if (typeTag(t) == 0) {
t.recode(x)
} else {
val prevT = prevType(t)
box(t.recode(x), t, helper(x, prevT), prevT)
}
}
// fill MSBs of subword loads to emulate a wider load of a NaN-boxed value
val boxes = floatTypes.map(t => ((BigInt(1) << maxType.ieeeWidth) - (BigInt(1) << t.ieeeWidth)).U)
helper(boxes(tag) | x, maxType)
}
// implement NaN unboxing and un-recoding for FS*/fmv.x.*
def ieee(x: UInt, t: FType = maxType): UInt = {
if (typeTag(t) == 0) {
t.ieee(x)
} else {
val unrecoded = t.ieee(x)
val prevT = prevType(t)
val prevRecoded = Cat(
x(prevT.recodedWidth-2),
x(t.sig-1),
x(prevT.recodedWidth-3, 0))
val prevUnrecoded = ieee(prevRecoded, prevT)
Cat(unrecoded >> prevT.ieeeWidth, Mux(t.isNaN(x), prevUnrecoded, unrecoded(prevT.ieeeWidth-1, 0)))
}
}
}
abstract class FPUModule(implicit val p: Parameters) extends Module with HasCoreParameters with HasFPUParameters
class FPToInt(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
class Output extends Bundle {
val in = new FPInput
val lt = Bool()
val store = Bits(fLen.W)
val toint = Bits(xLen.W)
val exc = Bits(FPConstants.FLAGS_SZ.W)
}
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new Output)
})
val in = RegEnable(io.in.bits, io.in.valid)
val valid = RegNext(io.in.valid)
val dcmp = Module(new hardfloat.CompareRecFN(maxExpWidth, maxSigWidth))
dcmp.io.a := in.in1
dcmp.io.b := in.in2
dcmp.io.signaling := !in.rm(1)
val tag = in.typeTagOut
val toint_ieee = (floatTypes.map(t => if (t == FType.H) Fill(maxType.ieeeWidth / minXLen, ieee(in.in1)(15, 0).sextTo(minXLen))
else Fill(maxType.ieeeWidth / t.ieeeWidth, ieee(in.in1)(t.ieeeWidth - 1, 0))): Seq[UInt])(tag)
val toint = WireDefault(toint_ieee)
val intType = WireDefault(in.fmt(0))
io.out.bits.store := (floatTypes.map(t => Fill(fLen / t.ieeeWidth, ieee(in.in1)(t.ieeeWidth - 1, 0))): Seq[UInt])(tag)
io.out.bits.toint := ((0 until nIntTypes).map(i => toint((minXLen << i) - 1, 0).sextTo(xLen)): Seq[UInt])(intType)
io.out.bits.exc := 0.U
when (in.rm(0)) {
val classify_out = (floatTypes.map(t => t.classify(maxType.unsafeConvert(in.in1, t))): Seq[UInt])(tag)
toint := classify_out | (toint_ieee >> minXLen << minXLen)
intType := false.B
}
when (in.wflags) { // feq/flt/fle, fcvt
toint := (~in.rm & Cat(dcmp.io.lt, dcmp.io.eq)).orR | (toint_ieee >> minXLen << minXLen)
io.out.bits.exc := dcmp.io.exceptionFlags
intType := false.B
when (!in.ren2) { // fcvt
val cvtType = in.typ.extract(log2Ceil(nIntTypes), 1)
intType := cvtType
val conv = Module(new hardfloat.RecFNToIN(maxExpWidth, maxSigWidth, xLen))
conv.io.in := in.in1
conv.io.roundingMode := in.rm
conv.io.signedOut := ~in.typ(0)
toint := conv.io.out
io.out.bits.exc := Cat(conv.io.intExceptionFlags(2, 1).orR, 0.U(3.W), conv.io.intExceptionFlags(0))
for (i <- 0 until nIntTypes-1) {
val w = minXLen << i
when (cvtType === i.U) {
val narrow = Module(new hardfloat.RecFNToIN(maxExpWidth, maxSigWidth, w))
narrow.io.in := in.in1
narrow.io.roundingMode := in.rm
narrow.io.signedOut := ~in.typ(0)
val excSign = in.in1(maxExpWidth + maxSigWidth) && !maxType.isNaN(in.in1)
val excOut = Cat(conv.io.signedOut === excSign, Fill(w-1, !excSign))
val invalid = conv.io.intExceptionFlags(2) || narrow.io.intExceptionFlags(1)
when (invalid) { toint := Cat(conv.io.out >> w, excOut) }
io.out.bits.exc := Cat(invalid, 0.U(3.W), !invalid && conv.io.intExceptionFlags(0))
}
}
}
}
io.out.valid := valid
io.out.bits.lt := dcmp.io.lt || (dcmp.io.a.asSInt < 0.S && dcmp.io.b.asSInt >= 0.S)
io.out.bits.in := in
}
class IntToFP(val latency: Int)(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
val io = IO(new Bundle {
val in = Flipped(Valid(new IntToFPInput))
val out = Valid(new FPResult)
})
val in = Pipe(io.in)
val tag = in.bits.typeTagIn
val mux = Wire(new FPResult)
mux.exc := 0.U
mux.data := recode(in.bits.in1, tag)
val intValue = {
val res = WireDefault(in.bits.in1.asSInt)
for (i <- 0 until nIntTypes-1) {
val smallInt = in.bits.in1((minXLen << i) - 1, 0)
when (in.bits.typ.extract(log2Ceil(nIntTypes), 1) === i.U) {
res := Mux(in.bits.typ(0), smallInt.zext, smallInt.asSInt)
}
}
res.asUInt
}
when (in.bits.wflags) { // fcvt
// could be improved for RVD/RVQ with a single variable-position rounding
// unit, rather than N fixed-position ones
val i2fResults = for (t <- floatTypes) yield {
val i2f = Module(new hardfloat.INToRecFN(xLen, t.exp, t.sig))
i2f.io.signedIn := ~in.bits.typ(0)
i2f.io.in := intValue
i2f.io.roundingMode := in.bits.rm
i2f.io.detectTininess := hardfloat.consts.tininess_afterRounding
(sanitizeNaN(i2f.io.out, t), i2f.io.exceptionFlags)
}
val (data, exc) = i2fResults.unzip
val dataPadded = data.init.map(d => Cat(data.last >> d.getWidth, d)) :+ data.last
mux.data := dataPadded(tag)
mux.exc := exc(tag)
}
io.out <> Pipe(in.valid, mux, latency-1)
}
class FPToFP(val latency: Int)(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new FPResult)
val lt = Input(Bool()) // from FPToInt
})
val in = Pipe(io.in)
val signNum = Mux(in.bits.rm(1), in.bits.in1 ^ in.bits.in2, Mux(in.bits.rm(0), ~in.bits.in2, in.bits.in2))
val fsgnj = Cat(signNum(fLen), in.bits.in1(fLen-1, 0))
val fsgnjMux = Wire(new FPResult)
fsgnjMux.exc := 0.U
fsgnjMux.data := fsgnj
when (in.bits.wflags) { // fmin/fmax
val isnan1 = maxType.isNaN(in.bits.in1)
val isnan2 = maxType.isNaN(in.bits.in2)
val isInvalid = maxType.isSNaN(in.bits.in1) || maxType.isSNaN(in.bits.in2)
val isNaNOut = isnan1 && isnan2
val isLHS = isnan2 || in.bits.rm(0) =/= io.lt && !isnan1
fsgnjMux.exc := isInvalid << 4
fsgnjMux.data := Mux(isNaNOut, maxType.qNaN, Mux(isLHS, in.bits.in1, in.bits.in2))
}
val inTag = in.bits.typeTagIn
val outTag = in.bits.typeTagOut
val mux = WireDefault(fsgnjMux)
for (t <- floatTypes.init) {
when (outTag === typeTag(t).U) {
mux.data := Cat(fsgnjMux.data >> t.recodedWidth, maxType.unsafeConvert(fsgnjMux.data, t))
}
}
when (in.bits.wflags && !in.bits.ren2) { // fcvt
if (floatTypes.size > 1) {
// widening conversions simply canonicalize NaN operands
val widened = Mux(maxType.isNaN(in.bits.in1), maxType.qNaN, in.bits.in1)
fsgnjMux.data := widened
fsgnjMux.exc := maxType.isSNaN(in.bits.in1) << 4
// narrowing conversions require rounding (for RVQ, this could be
// optimized to use a single variable-position rounding unit, rather
// than two fixed-position ones)
for (outType <- floatTypes.init) when (outTag === typeTag(outType).U && ((typeTag(outType) == 0).B || outTag < inTag)) {
val narrower = Module(new hardfloat.RecFNToRecFN(maxType.exp, maxType.sig, outType.exp, outType.sig))
narrower.io.in := in.bits.in1
narrower.io.roundingMode := in.bits.rm
narrower.io.detectTininess := hardfloat.consts.tininess_afterRounding
val narrowed = sanitizeNaN(narrower.io.out, outType)
mux.data := Cat(fsgnjMux.data >> narrowed.getWidth, narrowed)
mux.exc := narrower.io.exceptionFlags
}
}
}
io.out <> Pipe(in.valid, mux, latency-1)
}
class MulAddRecFNPipe(latency: Int, expWidth: Int, sigWidth: Int) extends Module
{
override def desiredName = s"MulAddRecFNPipe_l${latency}_e${expWidth}_s${sigWidth}"
require(latency<=2)
val io = IO(new Bundle {
val validin = Input(Bool())
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
val validout = Output(Bool())
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul = Module(new hardfloat.MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul = Module(new hardfloat.MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
val valid_stage0 = Wire(Bool())
val roundingMode_stage0 = Wire(UInt(3.W))
val detectTininess_stage0 = Wire(UInt(1.W))
val postmul_regs = if(latency>0) 1 else 0
mulAddRecFNToRaw_postMul.io.fromPreMul := Pipe(io.validin, mulAddRecFNToRaw_preMul.io.toPostMul, postmul_regs).bits
mulAddRecFNToRaw_postMul.io.mulAddResult := Pipe(io.validin, mulAddResult, postmul_regs).bits
mulAddRecFNToRaw_postMul.io.roundingMode := Pipe(io.validin, io.roundingMode, postmul_regs).bits
roundingMode_stage0 := Pipe(io.validin, io.roundingMode, postmul_regs).bits
detectTininess_stage0 := Pipe(io.validin, io.detectTininess, postmul_regs).bits
valid_stage0 := Pipe(io.validin, false.B, postmul_regs).valid
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN = Module(new hardfloat.RoundRawFNToRecFN(expWidth, sigWidth, 0))
val round_regs = if(latency==2) 1 else 0
roundRawFNToRecFN.io.invalidExc := Pipe(valid_stage0, mulAddRecFNToRaw_postMul.io.invalidExc, round_regs).bits
roundRawFNToRecFN.io.in := Pipe(valid_stage0, mulAddRecFNToRaw_postMul.io.rawOut, round_regs).bits
roundRawFNToRecFN.io.roundingMode := Pipe(valid_stage0, roundingMode_stage0, round_regs).bits
roundRawFNToRecFN.io.detectTininess := Pipe(valid_stage0, detectTininess_stage0, round_regs).bits
io.validout := Pipe(valid_stage0, false.B, round_regs).valid
roundRawFNToRecFN.io.infiniteExc := false.B
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
class FPUFMAPipe(val latency: Int, val t: FType)
(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
override def desiredName = s"FPUFMAPipe_l${latency}_f${t.ieeeWidth}"
require(latency>0)
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new FPResult)
})
val valid = RegNext(io.in.valid)
val in = Reg(new FPInput)
when (io.in.valid) {
val one = 1.U << (t.sig + t.exp - 1)
val zero = (io.in.bits.in1 ^ io.in.bits.in2) & (1.U << (t.sig + t.exp))
val cmd_fma = io.in.bits.ren3
val cmd_addsub = io.in.bits.swap23
in := io.in.bits
when (cmd_addsub) { in.in2 := one }
when (!(cmd_fma || cmd_addsub)) { in.in3 := zero }
}
val fma = Module(new MulAddRecFNPipe((latency-1) min 2, t.exp, t.sig))
fma.io.validin := valid
fma.io.op := in.fmaCmd
fma.io.roundingMode := in.rm
fma.io.detectTininess := hardfloat.consts.tininess_afterRounding
fma.io.a := in.in1
fma.io.b := in.in2
fma.io.c := in.in3
val res = Wire(new FPResult)
res.data := sanitizeNaN(fma.io.out, t)
res.exc := fma.io.exceptionFlags
io.out := Pipe(fma.io.validout, res, (latency-3) max 0)
}
class FPU(cfg: FPUParams)(implicit p: Parameters) extends FPUModule()(p) {
val io = IO(new FPUIO)
val (useClockGating, useDebugROB) = coreParams match {
case r: RocketCoreParams =>
val sz = if (r.debugROB.isDefined) r.debugROB.get.size else 1
(r.clockGate, sz < 1)
case _ => (false, false)
}
val clock_en_reg = Reg(Bool())
val clock_en = clock_en_reg || io.cp_req.valid
val gated_clock =
if (!useClockGating) clock
else ClockGate(clock, clock_en, "fpu_clock_gate")
val fp_decoder = Module(new FPUDecoder)
fp_decoder.io.inst := io.inst
val id_ctrl = WireInit(fp_decoder.io.sigs)
coreParams match { case r: RocketCoreParams => r.vector.map(v => {
val v_decode = v.decoder(p) // Only need to get ren1
v_decode.io.inst := io.inst
v_decode.io.vconfig := DontCare // core deals with this
when (v_decode.io.legal && v_decode.io.read_frs1) {
id_ctrl.ren1 := true.B
id_ctrl.swap12 := false.B
id_ctrl.toint := true.B
id_ctrl.typeTagIn := I
id_ctrl.typeTagOut := Mux(io.v_sew === 3.U, D, S)
}
when (v_decode.io.write_frd) { id_ctrl.wen := true.B }
})}
val ex_reg_valid = RegNext(io.valid, false.B)
val ex_reg_inst = RegEnable(io.inst, io.valid)
val ex_reg_ctrl = RegEnable(id_ctrl, io.valid)
val ex_ra = List.fill(3)(Reg(UInt()))
// load/vector response
val load_wb = RegNext(io.ll_resp_val)
val load_wb_typeTag = RegEnable(io.ll_resp_type(1,0) - typeTagWbOffset, io.ll_resp_val)
val load_wb_data = RegEnable(io.ll_resp_data, io.ll_resp_val)
val load_wb_tag = RegEnable(io.ll_resp_tag, io.ll_resp_val)
class FPUImpl { // entering gated-clock domain
val req_valid = ex_reg_valid || io.cp_req.valid
val ex_cp_valid = io.cp_req.fire
val mem_cp_valid = RegNext(ex_cp_valid, false.B)
val wb_cp_valid = RegNext(mem_cp_valid, false.B)
val mem_reg_valid = RegInit(false.B)
val killm = (io.killm || io.nack_mem) && !mem_cp_valid
// Kill X-stage instruction if M-stage is killed. This prevents it from
// speculatively being sent to the div-sqrt unit, which can cause priority
// inversion for two back-to-back divides, the first of which is killed.
val killx = io.killx || mem_reg_valid && killm
mem_reg_valid := ex_reg_valid && !killx || ex_cp_valid
val mem_reg_inst = RegEnable(ex_reg_inst, ex_reg_valid)
val wb_reg_valid = RegNext(mem_reg_valid && (!killm || mem_cp_valid), false.B)
val cp_ctrl = Wire(new FPUCtrlSigs)
cp_ctrl :<>= io.cp_req.bits.viewAsSupertype(new FPUCtrlSigs)
io.cp_resp.valid := false.B
io.cp_resp.bits.data := 0.U
io.cp_resp.bits.exc := DontCare
val ex_ctrl = Mux(ex_cp_valid, cp_ctrl, ex_reg_ctrl)
val mem_ctrl = RegEnable(ex_ctrl, req_valid)
val wb_ctrl = RegEnable(mem_ctrl, mem_reg_valid)
// CoreMonitorBundle to monitor fp register file writes
val frfWriteBundle = Seq.fill(2)(WireInit(new CoreMonitorBundle(xLen, fLen), DontCare))
frfWriteBundle.foreach { i =>
i.clock := clock
i.reset := reset
i.hartid := io.hartid
i.timer := io.time(31,0)
i.valid := false.B
i.wrenx := false.B
i.wrenf := false.B
i.excpt := false.B
}
// regfile
val regfile = Mem(32, Bits((fLen+1).W))
when (load_wb) {
val wdata = recode(load_wb_data, load_wb_typeTag)
regfile(load_wb_tag) := wdata
assert(consistent(wdata))
if (enableCommitLog)
printf("f%d p%d 0x%x\n", load_wb_tag, load_wb_tag + 32.U, ieee(wdata))
if (useDebugROB)
DebugROB.pushWb(clock, reset, io.hartid, load_wb, load_wb_tag + 32.U, ieee(wdata))
frfWriteBundle(0).wrdst := load_wb_tag
frfWriteBundle(0).wrenf := true.B
frfWriteBundle(0).wrdata := ieee(wdata)
}
val ex_rs = ex_ra.map(a => regfile(a))
when (io.valid) {
when (id_ctrl.ren1) {
when (!id_ctrl.swap12) { ex_ra(0) := io.inst(19,15) }
when (id_ctrl.swap12) { ex_ra(1) := io.inst(19,15) }
}
when (id_ctrl.ren2) {
when (id_ctrl.swap12) { ex_ra(0) := io.inst(24,20) }
when (id_ctrl.swap23) { ex_ra(2) := io.inst(24,20) }
when (!id_ctrl.swap12 && !id_ctrl.swap23) { ex_ra(1) := io.inst(24,20) }
}
when (id_ctrl.ren3) { ex_ra(2) := io.inst(31,27) }
}
val ex_rm = Mux(ex_reg_inst(14,12) === 7.U, io.fcsr_rm, ex_reg_inst(14,12))
def fuInput(minT: Option[FType]): FPInput = {
val req = Wire(new FPInput)
val tag = ex_ctrl.typeTagIn
req.viewAsSupertype(new Bundle with HasFPUCtrlSigs) :#= ex_ctrl.viewAsSupertype(new Bundle with HasFPUCtrlSigs)
req.rm := ex_rm
req.in1 := unbox(ex_rs(0), tag, minT)
req.in2 := unbox(ex_rs(1), tag, minT)
req.in3 := unbox(ex_rs(2), tag, minT)
req.typ := ex_reg_inst(21,20)
req.fmt := ex_reg_inst(26,25)
req.fmaCmd := ex_reg_inst(3,2) | (!ex_ctrl.ren3 && ex_reg_inst(27))
when (ex_cp_valid) {
req := io.cp_req.bits
when (io.cp_req.bits.swap12) {
req.in1 := io.cp_req.bits.in2
req.in2 := io.cp_req.bits.in1
}
when (io.cp_req.bits.swap23) {
req.in2 := io.cp_req.bits.in3
req.in3 := io.cp_req.bits.in2
}
}
req
}
val sfma = Module(new FPUFMAPipe(cfg.sfmaLatency, FType.S))
sfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === S
sfma.io.in.bits := fuInput(Some(sfma.t))
val fpiu = Module(new FPToInt)
fpiu.io.in.valid := req_valid && (ex_ctrl.toint || ex_ctrl.div || ex_ctrl.sqrt || (ex_ctrl.fastpipe && ex_ctrl.wflags))
fpiu.io.in.bits := fuInput(None)
io.store_data := fpiu.io.out.bits.store
io.toint_data := fpiu.io.out.bits.toint
when(fpiu.io.out.valid && mem_cp_valid && mem_ctrl.toint){
io.cp_resp.bits.data := fpiu.io.out.bits.toint
io.cp_resp.valid := true.B
}
val ifpu = Module(new IntToFP(cfg.ifpuLatency))
ifpu.io.in.valid := req_valid && ex_ctrl.fromint
ifpu.io.in.bits := fpiu.io.in.bits
ifpu.io.in.bits.in1 := Mux(ex_cp_valid, io.cp_req.bits.in1, io.fromint_data)
val fpmu = Module(new FPToFP(cfg.fpmuLatency))
fpmu.io.in.valid := req_valid && ex_ctrl.fastpipe
fpmu.io.in.bits := fpiu.io.in.bits
fpmu.io.lt := fpiu.io.out.bits.lt
val divSqrt_wen = WireDefault(false.B)
val divSqrt_inFlight = WireDefault(false.B)
val divSqrt_waddr = Reg(UInt(5.W))
val divSqrt_cp = Reg(Bool())
val divSqrt_typeTag = Wire(UInt(log2Up(floatTypes.size).W))
val divSqrt_wdata = Wire(UInt((fLen+1).W))
val divSqrt_flags = Wire(UInt(FPConstants.FLAGS_SZ.W))
divSqrt_typeTag := DontCare
divSqrt_wdata := DontCare
divSqrt_flags := DontCare
// writeback arbitration
case class Pipe(p: Module, lat: Int, cond: (FPUCtrlSigs) => Bool, res: FPResult)
val pipes = List(
Pipe(fpmu, fpmu.latency, (c: FPUCtrlSigs) => c.fastpipe, fpmu.io.out.bits),
Pipe(ifpu, ifpu.latency, (c: FPUCtrlSigs) => c.fromint, ifpu.io.out.bits),
Pipe(sfma, sfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === S, sfma.io.out.bits)) ++
(fLen > 32).option({
val dfma = Module(new FPUFMAPipe(cfg.dfmaLatency, FType.D))
dfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === D
dfma.io.in.bits := fuInput(Some(dfma.t))
Pipe(dfma, dfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === D, dfma.io.out.bits)
}) ++
(minFLen == 16).option({
val hfma = Module(new FPUFMAPipe(cfg.sfmaLatency, FType.H))
hfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === H
hfma.io.in.bits := fuInput(Some(hfma.t))
Pipe(hfma, hfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === H, hfma.io.out.bits)
})
def latencyMask(c: FPUCtrlSigs, offset: Int) = {
require(pipes.forall(_.lat >= offset))
pipes.map(p => Mux(p.cond(c), (1 << p.lat-offset).U, 0.U)).reduce(_|_)
}
def pipeid(c: FPUCtrlSigs) = pipes.zipWithIndex.map(p => Mux(p._1.cond(c), p._2.U, 0.U)).reduce(_|_)
val maxLatency = pipes.map(_.lat).max
val memLatencyMask = latencyMask(mem_ctrl, 2)
class WBInfo extends Bundle {
val rd = UInt(5.W)
val typeTag = UInt(log2Up(floatTypes.size).W)
val cp = Bool()
val pipeid = UInt(log2Ceil(pipes.size).W)
}
val wen = RegInit(0.U((maxLatency-1).W))
val wbInfo = Reg(Vec(maxLatency-1, new WBInfo))
val mem_wen = mem_reg_valid && (mem_ctrl.fma || mem_ctrl.fastpipe || mem_ctrl.fromint)
val write_port_busy = RegEnable(mem_wen && (memLatencyMask & latencyMask(ex_ctrl, 1)).orR || (wen & latencyMask(ex_ctrl, 0)).orR, req_valid)
ccover(mem_reg_valid && write_port_busy, "WB_STRUCTURAL", "structural hazard on writeback")
for (i <- 0 until maxLatency-2) {
when (wen(i+1)) { wbInfo(i) := wbInfo(i+1) }
}
wen := wen >> 1
when (mem_wen) {
when (!killm) {
wen := wen >> 1 | memLatencyMask
}
for (i <- 0 until maxLatency-1) {
when (!write_port_busy && memLatencyMask(i)) {
wbInfo(i).cp := mem_cp_valid
wbInfo(i).typeTag := mem_ctrl.typeTagOut
wbInfo(i).pipeid := pipeid(mem_ctrl)
wbInfo(i).rd := mem_reg_inst(11,7)
}
}
}
val waddr = Mux(divSqrt_wen, divSqrt_waddr, wbInfo(0).rd)
val wb_cp = Mux(divSqrt_wen, divSqrt_cp, wbInfo(0).cp)
val wtypeTag = Mux(divSqrt_wen, divSqrt_typeTag, wbInfo(0).typeTag)
val wdata = box(Mux(divSqrt_wen, divSqrt_wdata, (pipes.map(_.res.data): Seq[UInt])(wbInfo(0).pipeid)), wtypeTag)
val wexc = (pipes.map(_.res.exc): Seq[UInt])(wbInfo(0).pipeid)
when ((!wbInfo(0).cp && wen(0)) || divSqrt_wen) {
assert(consistent(wdata))
regfile(waddr) := wdata
if (enableCommitLog) {
printf("f%d p%d 0x%x\n", waddr, waddr + 32.U, ieee(wdata))
}
frfWriteBundle(1).wrdst := waddr
frfWriteBundle(1).wrenf := true.B
frfWriteBundle(1).wrdata := ieee(wdata)
}
if (useDebugROB) {
DebugROB.pushWb(clock, reset, io.hartid, (!wbInfo(0).cp && wen(0)) || divSqrt_wen, waddr + 32.U, ieee(wdata))
}
when (wb_cp && (wen(0) || divSqrt_wen)) {
io.cp_resp.bits.data := wdata
io.cp_resp.valid := true.B
}
assert(!io.cp_req.valid || pipes.forall(_.lat == pipes.head.lat).B,
s"FPU only supports coprocessor if FMA pipes have uniform latency ${pipes.map(_.lat)}")
// Avoid structural hazards and nacking of external requests
// toint responds in the MEM stage, so an incoming toint can induce a structural hazard against inflight FMAs
io.cp_req.ready := !ex_reg_valid && !(cp_ctrl.toint && wen =/= 0.U) && !divSqrt_inFlight
val wb_toint_valid = wb_reg_valid && wb_ctrl.toint
val wb_toint_exc = RegEnable(fpiu.io.out.bits.exc, mem_ctrl.toint)
io.fcsr_flags.valid := wb_toint_valid || divSqrt_wen || wen(0)
io.fcsr_flags.bits :=
Mux(wb_toint_valid, wb_toint_exc, 0.U) |
Mux(divSqrt_wen, divSqrt_flags, 0.U) |
Mux(wen(0), wexc, 0.U)
val divSqrt_write_port_busy = (mem_ctrl.div || mem_ctrl.sqrt) && wen.orR
io.fcsr_rdy := !(ex_reg_valid && ex_ctrl.wflags || mem_reg_valid && mem_ctrl.wflags || wb_reg_valid && wb_ctrl.toint || wen.orR || divSqrt_inFlight)
io.nack_mem := (write_port_busy || divSqrt_write_port_busy || divSqrt_inFlight) && !mem_cp_valid
io.dec <> id_ctrl
def useScoreboard(f: ((Pipe, Int)) => Bool) = pipes.zipWithIndex.filter(_._1.lat > 3).map(x => f(x)).fold(false.B)(_||_)
io.sboard_set := wb_reg_valid && !wb_cp_valid && RegNext(useScoreboard(_._1.cond(mem_ctrl)) || mem_ctrl.div || mem_ctrl.sqrt || mem_ctrl.vec)
io.sboard_clr := !wb_cp_valid && (divSqrt_wen || (wen(0) && useScoreboard(x => wbInfo(0).pipeid === x._2.U)))
io.sboard_clra := waddr
ccover(io.sboard_clr && load_wb, "DUAL_WRITEBACK", "load and FMA writeback on same cycle")
// we don't currently support round-max-magnitude (rm=4)
io.illegal_rm := io.inst(14,12).isOneOf(5.U, 6.U) || io.inst(14,12) === 7.U && io.fcsr_rm >= 5.U
if (cfg.divSqrt) {
val divSqrt_inValid = mem_reg_valid && (mem_ctrl.div || mem_ctrl.sqrt) && !divSqrt_inFlight
val divSqrt_killed = RegNext(divSqrt_inValid && killm, true.B)
when (divSqrt_inValid) {
divSqrt_waddr := mem_reg_inst(11,7)
divSqrt_cp := mem_cp_valid
}
ccover(divSqrt_inFlight && divSqrt_killed, "DIV_KILLED", "divide killed after issued to divider")
ccover(divSqrt_inFlight && mem_reg_valid && (mem_ctrl.div || mem_ctrl.sqrt), "DIV_BUSY", "divider structural hazard")
ccover(mem_reg_valid && divSqrt_write_port_busy, "DIV_WB_STRUCTURAL", "structural hazard on division writeback")
for (t <- floatTypes) {
val tag = mem_ctrl.typeTagOut
val divSqrt = withReset(divSqrt_killed) { Module(new hardfloat.DivSqrtRecFN_small(t.exp, t.sig, 0)) }
divSqrt.io.inValid := divSqrt_inValid && tag === typeTag(t).U
divSqrt.io.sqrtOp := mem_ctrl.sqrt
divSqrt.io.a := maxType.unsafeConvert(fpiu.io.out.bits.in.in1, t)
divSqrt.io.b := maxType.unsafeConvert(fpiu.io.out.bits.in.in2, t)
divSqrt.io.roundingMode := fpiu.io.out.bits.in.rm
divSqrt.io.detectTininess := hardfloat.consts.tininess_afterRounding
when (!divSqrt.io.inReady) { divSqrt_inFlight := true.B } // only 1 in flight
when (divSqrt.io.outValid_div || divSqrt.io.outValid_sqrt) {
divSqrt_wen := !divSqrt_killed
divSqrt_wdata := sanitizeNaN(divSqrt.io.out, t)
divSqrt_flags := divSqrt.io.exceptionFlags
divSqrt_typeTag := typeTag(t).U
}
}
when (divSqrt_killed) { divSqrt_inFlight := false.B }
} else {
when (id_ctrl.div || id_ctrl.sqrt) { io.illegal_rm := true.B }
}
// gate the clock
clock_en_reg := !useClockGating.B ||
io.keep_clock_enabled || // chicken bit
io.valid || // ID stage
req_valid || // EX stage
mem_reg_valid || mem_cp_valid || // MEM stage
wb_reg_valid || wb_cp_valid || // WB stage
wen.orR || divSqrt_inFlight || // post-WB stage
io.ll_resp_val // load writeback
} // leaving gated-clock domain
val fpuImpl = withClock (gated_clock) { new FPUImpl }
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
property.cover(cond, s"FPU_$label", "Core;;" + desc)
} | module IntToFP(
input clock,
input reset,
input io_in_valid,
input [1:0] io_in_bits_typeTagIn,
input io_in_bits_wflags,
input [2:0] io_in_bits_rm,
input [1:0] io_in_bits_typ,
input [63:0] io_in_bits_in1,
output io_out_valid,
output [64:0] io_out_bits_data,
output [4:0] io_out_bits_exc
);
wire [64:0] _i2fResults_i2f_1_io_out;
wire [4:0] _i2fResults_i2f_1_io_exceptionFlags;
wire [32:0] _i2fResults_i2f_io_out;
wire [4:0] _i2fResults_i2f_io_exceptionFlags;
reg in_pipe_v;
reg [1:0] in_pipe_b_typeTagIn;
reg in_pipe_b_wflags;
reg [2:0] in_pipe_b_rm;
reg [1:0] in_pipe_b_typ;
reg [63:0] in_pipe_b_in1;
wire [63:0] intValue_res = in_pipe_b_typ[1] ? in_pipe_b_in1 : {{32{~(in_pipe_b_typ[0]) & in_pipe_b_in1[31]}}, in_pipe_b_in1[31:0]};
reg io_out_pipe_v;
reg [64:0] io_out_pipe_b_data;
reg [4:0] io_out_pipe_b_exc;
wire [63:0] _mux_data_T_2 = (in_pipe_b_typeTagIn[0] ? 64'h0 : 64'hFFFFFFFF00000000) | in_pipe_b_in1;
wire mux_data_rawIn_isZeroExpIn = _mux_data_T_2[62:52] == 11'h0;
wire [5:0] mux_data_rawIn_normDist = _mux_data_T_2[51] ? 6'h0 : _mux_data_T_2[50] ? 6'h1 : _mux_data_T_2[49] ? 6'h2 : _mux_data_T_2[48] ? 6'h3 : _mux_data_T_2[47] ? 6'h4 : _mux_data_T_2[46] ? 6'h5 : _mux_data_T_2[45] ? 6'h6 : _mux_data_T_2[44] ? 6'h7 : _mux_data_T_2[43] ? 6'h8 : _mux_data_T_2[42] ? 6'h9 : _mux_data_T_2[41] ? 6'hA : _mux_data_T_2[40] ? 6'hB : _mux_data_T_2[39] ? 6'hC : _mux_data_T_2[38] ? 6'hD : _mux_data_T_2[37] ? 6'hE : _mux_data_T_2[36] ? 6'hF : _mux_data_T_2[35] ? 6'h10 : _mux_data_T_2[34] ? 6'h11 : _mux_data_T_2[33] ? 6'h12 : _mux_data_T_2[32] ? 6'h13 : _mux_data_T_2[31] ? 6'h14 : _mux_data_T_2[30] ? 6'h15 : _mux_data_T_2[29] ? 6'h16 : _mux_data_T_2[28] ? 6'h17 : _mux_data_T_2[27] ? 6'h18 : _mux_data_T_2[26] ? 6'h19 : _mux_data_T_2[25] ? 6'h1A : _mux_data_T_2[24] ? 6'h1B : _mux_data_T_2[23] ? 6'h1C : _mux_data_T_2[22] ? 6'h1D : _mux_data_T_2[21] ? 6'h1E : _mux_data_T_2[20] ? 6'h1F : _mux_data_T_2[19] ? 6'h20 : _mux_data_T_2[18] ? 6'h21 : _mux_data_T_2[17] ? 6'h22 : _mux_data_T_2[16] ? 6'h23 : _mux_data_T_2[15] ? 6'h24 : _mux_data_T_2[14] ? 6'h25 : _mux_data_T_2[13] ? 6'h26 : _mux_data_T_2[12] ? 6'h27 : _mux_data_T_2[11] ? 6'h28 : _mux_data_T_2[10] ? 6'h29 : _mux_data_T_2[9] ? 6'h2A : _mux_data_T_2[8] ? 6'h2B : _mux_data_T_2[7] ? 6'h2C : _mux_data_T_2[6] ? 6'h2D : _mux_data_T_2[5] ? 6'h2E : _mux_data_T_2[4] ? 6'h2F : _mux_data_T_2[3] ? 6'h30 : _mux_data_T_2[2] ? 6'h31 : {5'h19, ~(_mux_data_T_2[1])};
wire [11:0] _mux_data_rawIn_adjustedExp_T_4 = (mux_data_rawIn_isZeroExpIn ? {6'h3F, ~mux_data_rawIn_normDist} : {1'h0, _mux_data_T_2[62:52]}) + {10'h100, mux_data_rawIn_isZeroExpIn ? 2'h2 : 2'h1};
wire [114:0] _mux_data_rawIn_subnormFract_T = {63'h0, _mux_data_T_2[51:0]} << mux_data_rawIn_normDist;
wire [51:0] _mux_data_rawIn_out_sig_T_2 = mux_data_rawIn_isZeroExpIn ? {_mux_data_rawIn_subnormFract_T[50:0], 1'h0} : _mux_data_T_2[51:0];
wire [2:0] _mux_data_T_4 = mux_data_rawIn_isZeroExpIn & ~(|(_mux_data_T_2[51:0])) ? 3'h0 : _mux_data_rawIn_adjustedExp_T_4[11:9];
wire _GEN = _mux_data_T_4[0] | (&(_mux_data_rawIn_adjustedExp_T_4[11:10])) & (|(_mux_data_T_2[51:0]));
wire mux_data_rawIn_isZeroExpIn_1 = _mux_data_T_2[30:23] == 8'h0;
wire [4:0] mux_data_rawIn_normDist_1 = _mux_data_T_2[22] ? 5'h0 : _mux_data_T_2[21] ? 5'h1 : _mux_data_T_2[20] ? 5'h2 : _mux_data_T_2[19] ? 5'h3 : _mux_data_T_2[18] ? 5'h4 : _mux_data_T_2[17] ? 5'h5 : _mux_data_T_2[16] ? 5'h6 : _mux_data_T_2[15] ? 5'h7 : _mux_data_T_2[14] ? 5'h8 : _mux_data_T_2[13] ? 5'h9 : _mux_data_T_2[12] ? 5'hA : _mux_data_T_2[11] ? 5'hB : _mux_data_T_2[10] ? 5'hC : _mux_data_T_2[9] ? 5'hD : _mux_data_T_2[8] ? 5'hE : _mux_data_T_2[7] ? 5'hF : _mux_data_T_2[6] ? 5'h10 : _mux_data_T_2[5] ? 5'h11 : _mux_data_T_2[4] ? 5'h12 : _mux_data_T_2[3] ? 5'h13 : _mux_data_T_2[2] ? 5'h14 : _mux_data_T_2[1] ? 5'h15 : 5'h16;
wire [8:0] _mux_data_rawIn_adjustedExp_T_9 = (mux_data_rawIn_isZeroExpIn_1 ? {4'hF, ~mux_data_rawIn_normDist_1} : {1'h0, _mux_data_T_2[30:23]}) + {7'h20, mux_data_rawIn_isZeroExpIn_1 ? 2'h2 : 2'h1};
wire [2:0] _mux_data_T_13 = mux_data_rawIn_isZeroExpIn_1 & ~(|(_mux_data_T_2[22:0])) ? 3'h0 : _mux_data_rawIn_adjustedExp_T_9[8:6];
wire [64:0] i2fResults_1_1 = ({65{_i2fResults_i2f_1_io_out[63:61] != 3'h7}} | 65'h1EFEFFFFFFFFFFFFF) & _i2fResults_i2f_1_io_out;
wire [53:0] _mux_data_rawIn_subnormFract_T_2 = {31'h0, _mux_data_T_2[22:0]} << mux_data_rawIn_normDist_1;
always @(posedge clock) begin
if (reset) begin
in_pipe_v <= 1'h0;
io_out_pipe_v <= 1'h0;
end
else begin
in_pipe_v <= io_in_valid;
io_out_pipe_v <= in_pipe_v;
end
if (io_in_valid) begin
in_pipe_b_typeTagIn <= io_in_bits_typeTagIn;
in_pipe_b_wflags <= io_in_bits_wflags;
in_pipe_b_rm <= io_in_bits_rm;
in_pipe_b_typ <= io_in_bits_typ;
in_pipe_b_in1 <= io_in_bits_in1;
end
if (in_pipe_v) begin
io_out_pipe_b_data <= in_pipe_b_wflags ? (in_pipe_b_typeTagIn[0] ? i2fResults_1_1 : {i2fResults_1_1[64:33], _i2fResults_i2f_io_out}) : {_mux_data_T_2[63], _mux_data_T_4[2:1], _GEN, (&{_mux_data_T_4[2:1], _GEN}) ? {&(_mux_data_rawIn_out_sig_T_2[51:32]), _mux_data_rawIn_adjustedExp_T_4[7:1], _mux_data_T_13[2], _mux_data_rawIn_out_sig_T_2[51:32], _mux_data_T_2[31], _mux_data_T_13[1], _mux_data_T_13[0] | (&(_mux_data_rawIn_adjustedExp_T_9[8:7])) & (|(_mux_data_T_2[22:0])), _mux_data_rawIn_adjustedExp_T_9[5:0], mux_data_rawIn_isZeroExpIn_1 ? {_mux_data_rawIn_subnormFract_T_2[21:0], 1'h0} : _mux_data_T_2[22:0]} : {_mux_data_rawIn_adjustedExp_T_4[8:0], _mux_data_rawIn_out_sig_T_2}};
io_out_pipe_b_exc <= in_pipe_b_wflags ? (in_pipe_b_typeTagIn[0] ? _i2fResults_i2f_1_io_exceptionFlags : _i2fResults_i2f_io_exceptionFlags) : 5'h0;
end
end
INToRecFN_i64_e8_s24 i2fResults_i2f (
.io_signedIn (~(in_pipe_b_typ[0])),
.io_in (intValue_res),
.io_roundingMode (in_pipe_b_rm),
.io_out (_i2fResults_i2f_io_out),
.io_exceptionFlags (_i2fResults_i2f_io_exceptionFlags)
);
INToRecFN_i64_e11_s53 i2fResults_i2f_1 (
.io_signedIn (~(in_pipe_b_typ[0])),
.io_in (intValue_res),
.io_roundingMode (in_pipe_b_rm),
.io_out (_i2fResults_i2f_1_io_out),
.io_exceptionFlags (_i2fResults_i2f_1_io_exceptionFlags)
);
assign io_out_valid = io_out_pipe_v;
assign io_out_bits_data = io_out_pipe_b_data;
assign io_out_bits_exc = io_out_pipe_b_exc;
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3.{Data, SyncReadMem, Vec}
import chisel3.util.log2Ceil
object DescribedSRAM {
def apply[T <: Data](
name: String,
desc: String,
size: BigInt, // depth
data: T
): SyncReadMem[T] = {
val mem = SyncReadMem(size, data)
mem.suggestName(name)
val granWidth = data match {
case v: Vec[_] => v.head.getWidth
case d => d.getWidth
}
val uid = 0
Annotated.srams(
component = mem,
name = name,
address_width = log2Ceil(size),
data_width = data.getWidth,
depth = size,
description = desc,
write_mask_granularity = granWidth
)
mem
}
} | module rockettile_dcache_tag_array(
input [5:0] RW0_addr,
input RW0_en,
input RW0_clk,
input RW0_wmode,
input [175:0] RW0_wdata,
output [175:0] RW0_rdata,
input [7:0] RW0_wmask
);
rockettile_dcache_tag_array_ext rockettile_dcache_tag_array_ext (
.RW0_addr (RW0_addr),
.RW0_en (RW0_en),
.RW0_clk (RW0_clk),
.RW0_wmode (RW0_wmode),
.RW0_wdata (RW0_wdata),
.RW0_rdata (RW0_rdata),
.RW0_wmask (RW0_wmask)
);
endmodule |
Generate the Verilog code corresponding to this Chisel code /*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.Fill
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundAnyRawFNToRecFN(
inExpWidth: Int,
inSigWidth: Int,
outExpWidth: Int,
outSigWidth: Int,
options: Int
)
extends RawModule
{
override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(inExpWidth, inSigWidth))
// (allowed exponent range has limits)
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0)
val effectiveInSigWidth =
if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1
val neverUnderflows =
((options &
(flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact)
) != 0) ||
(inExpWidth < outExpWidth)
val neverOverflows =
((options & flRoundOpt_neverOverflows) != 0) ||
(inExpWidth < outExpWidth)
val outNaNExp = BigInt(7)<<(outExpWidth - 2)
val outInfExp = BigInt(6)<<(outExpWidth - 2)
val outMaxFiniteExp = outInfExp - 1
val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2
val outMinNonzeroExp = outMinNormExp - outSigWidth + 1
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
val roundMagUp =
(roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sAdjustedExp =
if (inExpWidth < outExpWidth)
(io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
)(outExpWidth, 0).zext
else if (inExpWidth == outExpWidth)
io.in.sExp
else
io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
val adjustedSig =
if (inSigWidth <= outSigWidth + 2)
io.in.sig<<(outSigWidth - inSigWidth + 2)
else
(io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ##
io.in.sig(inSigWidth - outSigWidth - 2, 0).orR
)
val doShiftSigDown1 =
if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2)
val common_expOut = Wire(UInt((outExpWidth + 1).W))
val common_fractOut = Wire(UInt((outSigWidth - 1).W))
val common_overflow = Wire(Bool())
val common_totalUnderflow = Wire(Bool())
val common_underflow = Wire(Bool())
val common_inexact = Wire(Bool())
if (
neverOverflows && neverUnderflows
&& (effectiveInSigWidth <= outSigWidth)
) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1
common_fractOut :=
Mux(doShiftSigDown1,
adjustedSig(outSigWidth + 1, 3),
adjustedSig(outSigWidth, 2)
)
common_overflow := false.B
common_totalUnderflow := false.B
common_underflow := false.B
common_inexact := false.B
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundMask =
if (neverUnderflows)
0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W)
else
(lowMask(
sAdjustedExp(outExpWidth, 0),
outMinNormExp - outSigWidth - 1,
outMinNormExp
) | doShiftSigDown1) ##
3.U(2.W)
val shiftedRoundMask = 0.U(1.W) ## roundMask>>1
val roundPosMask = ~shiftedRoundMask & roundMask
val roundPosBit = (adjustedSig & roundPosMask).orR
val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR
val anyRound = roundPosBit || anyRoundExtra
val roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
roundPosBit) ||
(roundMagUp && anyRound)
val roundedSig: Bits =
Mux(roundIncr,
(((adjustedSig | roundMask)>>2) +& 1.U) &
~Mux(roundingMode_near_even && roundPosBit &&
! anyRoundExtra,
roundMask>>1,
0.U((outSigWidth + 2).W)
),
(adjustedSig & ~roundMask)>>2 |
Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U)
)
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext
common_expOut := sRoundedExp(outExpWidth, 0)
common_fractOut :=
Mux(doShiftSigDown1,
roundedSig(outSigWidth - 1, 1),
roundedSig(outSigWidth - 2, 0)
)
common_overflow :=
(if (neverOverflows) false.B else
//*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?:
(sRoundedExp>>(outExpWidth - 1) >= 3.S))
common_totalUnderflow :=
(if (neverUnderflows) false.B else
//*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?:
(sRoundedExp < outMinNonzeroExp.S))
val unboundedRange_roundPosBit =
Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1))
val unboundedRange_anyRound =
(doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR
val unboundedRange_roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
unboundedRange_roundPosBit) ||
(roundMagUp && unboundedRange_anyRound)
val roundCarry =
Mux(doShiftSigDown1,
roundedSig(outSigWidth + 1),
roundedSig(outSigWidth)
)
common_underflow :=
(if (neverUnderflows) false.B else
common_totalUnderflow ||
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
(anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) &&
Mux(doShiftSigDown1, roundMask(3), roundMask(2)) &&
! ((io.detectTininess === tininess_afterRounding) &&
! Mux(doShiftSigDown1,
roundMask(4),
roundMask(3)
) &&
roundCarry && roundPosBit &&
unboundedRange_roundIncr)))
common_inexact := common_totalUnderflow || anyRound
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val isNaNOut = io.invalidExc || io.in.isNaN
val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf
val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero
val overflow = commonCase && common_overflow
val underflow = commonCase && common_underflow
val inexact = overflow || (commonCase && common_inexact)
val overflow_roundMagUp =
roundingMode_near_even || roundingMode_near_maxMag || roundMagUp
val pegMinNonzeroMagOut =
commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd)
val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp
val notNaN_isInfOut =
notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp)
val signOut = Mux(isNaNOut, false.B, io.in.sign)
val expOut =
(common_expOut &
~Mux(io.in.isZero || common_totalUnderflow,
(BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMinNonzeroMagOut,
~outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMaxFiniteMagOut,
(BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W),
0.U
) &
~Mux(notNaN_isInfOut,
(BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
)) |
Mux(pegMinNonzeroMagOut,
outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) |
Mux(pegMaxFiniteMagOut,
outMaxFiniteExp.U((outExpWidth + 1).W),
0.U
) |
Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) |
Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U)
val fractOut =
Mux(isNaNOut || io.in.isZero || common_totalUnderflow,
Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U),
common_fractOut
) |
Fill(outSigWidth - 1, pegMaxFiniteMagOut)
io.out := signOut ## expOut ## fractOut
io.exceptionFlags :=
io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int)
extends RawModule
{
override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(expWidth, sigWidth + 2))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
expWidth, sigWidth + 2, expWidth, sigWidth, options))
roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc
roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc
roundAnyRawFNToRecFN.io.in := io.in
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
} | module RoundAnyRawFNToRecFN_ie7_is64_oe11_os53(
input io_in_isZero,
input io_in_sign,
input [8:0] io_in_sExp,
input [64:0] io_in_sig,
input [2:0] io_roundingMode,
output [64:0] io_out,
output [4:0] io_exceptionFlags
);
wire roundingMode_near_even = io_roundingMode == 3'h0;
wire [1:0] _GEN = {io_in_sig[10], |(io_in_sig[9:0])};
wire [54:0] roundedSig = (roundingMode_near_even | io_roundingMode == 3'h4) & io_in_sig[10] | (io_roundingMode == 3'h2 & io_in_sign | io_roundingMode == 3'h3 & ~io_in_sign) & (|_GEN) ? {1'h0, io_in_sig[64:11]} + 55'h1 & {54'h3FFFFFFFFFFFFF, ~(roundingMode_near_even & io_in_sig[10] & ~(|(io_in_sig[9:0])))} : {1'h0, io_in_sig[64:12], io_in_sig[11] | io_roundingMode == 3'h6 & (|_GEN)};
assign io_out = {io_in_sign, {{3{io_in_sExp[8]}}, io_in_sExp} + {10'h0, roundedSig[54:53]} + 12'h780 & ~(io_in_isZero ? 12'hE00 : 12'h0), io_in_isZero ? 52'h0 : roundedSig[51:0]};
assign io_exceptionFlags = {4'h0, ~io_in_isZero & (|_GEN)};
endmodule |
Generate the Verilog code corresponding to this Chisel code package boom.v3.ifu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import boom.v3.common._
import boom.v3.util.{BoomCoreStringPrefix, MaskLower, WrapInc}
import scala.math.min
class TageResp extends Bundle {
val ctr = UInt(3.W)
val u = UInt(2.W)
}
class TageTable(val nRows: Int, val tagSz: Int, val histLength: Int, val uBitPeriod: Int)
(implicit p: Parameters) extends BoomModule()(p)
with HasBoomFrontendParameters
{
require(histLength <= globalHistoryLength)
val nWrBypassEntries = 2
val io = IO( new Bundle {
val f1_req_valid = Input(Bool())
val f1_req_pc = Input(UInt(vaddrBitsExtended.W))
val f1_req_ghist = Input(UInt(globalHistoryLength.W))
val f3_resp = Output(Vec(bankWidth, Valid(new TageResp)))
val update_mask = Input(Vec(bankWidth, Bool()))
val update_taken = Input(Vec(bankWidth, Bool()))
val update_alloc = Input(Vec(bankWidth, Bool()))
val update_old_ctr = Input(Vec(bankWidth, UInt(3.W)))
val update_pc = Input(UInt())
val update_hist = Input(UInt())
val update_u_mask = Input(Vec(bankWidth, Bool()))
val update_u = Input(Vec(bankWidth, UInt(2.W)))
})
def compute_folded_hist(hist: UInt, l: Int) = {
val nChunks = (histLength + l - 1) / l
val hist_chunks = (0 until nChunks) map {i =>
hist(min((i+1)*l, histLength)-1, i*l)
}
hist_chunks.reduce(_^_)
}
def compute_tag_and_hash(unhashed_idx: UInt, hist: UInt) = {
val idx_history = compute_folded_hist(hist, log2Ceil(nRows))
val idx = (unhashed_idx ^ idx_history)(log2Ceil(nRows)-1,0)
val tag_history = compute_folded_hist(hist, tagSz)
val tag = ((unhashed_idx >> log2Ceil(nRows)) ^ tag_history)(tagSz-1,0)
(idx, tag)
}
def inc_ctr(ctr: UInt, taken: Bool): UInt = {
Mux(!taken, Mux(ctr === 0.U, 0.U, ctr - 1.U),
Mux(ctr === 7.U, 7.U, ctr + 1.U))
}
val doing_reset = RegInit(true.B)
val reset_idx = RegInit(0.U(log2Ceil(nRows).W))
reset_idx := reset_idx + doing_reset
when (reset_idx === (nRows-1).U) { doing_reset := false.B }
class TageEntry extends Bundle {
val valid = Bool() // TODO: Remove this valid bit
val tag = UInt(tagSz.W)
val ctr = UInt(3.W)
}
val tageEntrySz = 1 + tagSz + 3
val (s1_hashed_idx, s1_tag) = compute_tag_and_hash(fetchIdx(io.f1_req_pc), io.f1_req_ghist)
val hi_us = SyncReadMem(nRows, Vec(bankWidth, Bool()))
val lo_us = SyncReadMem(nRows, Vec(bankWidth, Bool()))
val table = SyncReadMem(nRows, Vec(bankWidth, UInt(tageEntrySz.W)))
val mems = Seq((f"tage_l$histLength", nRows, bankWidth * tageEntrySz))
val s2_tag = RegNext(s1_tag)
val s2_req_rtage = VecInit(table.read(s1_hashed_idx, io.f1_req_valid).map(_.asTypeOf(new TageEntry)))
val s2_req_rhius = hi_us.read(s1_hashed_idx, io.f1_req_valid)
val s2_req_rlous = lo_us.read(s1_hashed_idx, io.f1_req_valid)
val s2_req_rhits = VecInit(s2_req_rtage.map(e => e.valid && e.tag === s2_tag && !doing_reset))
for (w <- 0 until bankWidth) {
// This bit indicates the TAGE table matched here
io.f3_resp(w).valid := RegNext(s2_req_rhits(w))
io.f3_resp(w).bits.u := RegNext(Cat(s2_req_rhius(w), s2_req_rlous(w)))
io.f3_resp(w).bits.ctr := RegNext(s2_req_rtage(w).ctr)
}
val clear_u_ctr = RegInit(0.U((log2Ceil(uBitPeriod) + log2Ceil(nRows) + 1).W))
when (doing_reset) { clear_u_ctr := 1.U } .otherwise { clear_u_ctr := clear_u_ctr + 1.U }
val doing_clear_u = clear_u_ctr(log2Ceil(uBitPeriod)-1,0) === 0.U
val doing_clear_u_hi = doing_clear_u && clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 1.U
val doing_clear_u_lo = doing_clear_u && clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 0.U
val clear_u_idx = clear_u_ctr >> log2Ceil(uBitPeriod)
val (update_idx, update_tag) = compute_tag_and_hash(fetchIdx(io.update_pc), io.update_hist)
val update_wdata = Wire(Vec(bankWidth, new TageEntry))
table.write(
Mux(doing_reset, reset_idx , update_idx),
Mux(doing_reset, VecInit(Seq.fill(bankWidth) { 0.U(tageEntrySz.W) }), VecInit(update_wdata.map(_.asUInt))),
Mux(doing_reset, ~(0.U(bankWidth.W)) , io.update_mask.asUInt).asBools
)
val update_hi_wdata = Wire(Vec(bankWidth, Bool()))
hi_us.write(
Mux(doing_reset, reset_idx, Mux(doing_clear_u_hi, clear_u_idx, update_idx)),
Mux(doing_reset || doing_clear_u_hi, VecInit((0.U(bankWidth.W)).asBools), update_hi_wdata),
Mux(doing_reset || doing_clear_u_hi, ~(0.U(bankWidth.W)), io.update_u_mask.asUInt).asBools
)
val update_lo_wdata = Wire(Vec(bankWidth, Bool()))
lo_us.write(
Mux(doing_reset, reset_idx, Mux(doing_clear_u_lo, clear_u_idx, update_idx)),
Mux(doing_reset || doing_clear_u_lo, VecInit((0.U(bankWidth.W)).asBools), update_lo_wdata),
Mux(doing_reset || doing_clear_u_lo, ~(0.U(bankWidth.W)), io.update_u_mask.asUInt).asBools
)
val wrbypass_tags = Reg(Vec(nWrBypassEntries, UInt(tagSz.W)))
val wrbypass_idxs = Reg(Vec(nWrBypassEntries, UInt(log2Ceil(nRows).W)))
val wrbypass = Reg(Vec(nWrBypassEntries, Vec(bankWidth, UInt(3.W))))
val wrbypass_enq_idx = RegInit(0.U(log2Ceil(nWrBypassEntries).W))
val wrbypass_hits = VecInit((0 until nWrBypassEntries) map { i =>
!doing_reset &&
wrbypass_tags(i) === update_tag &&
wrbypass_idxs(i) === update_idx
})
val wrbypass_hit = wrbypass_hits.reduce(_||_)
val wrbypass_hit_idx = PriorityEncoder(wrbypass_hits)
for (w <- 0 until bankWidth) {
update_wdata(w).ctr := Mux(io.update_alloc(w),
Mux(io.update_taken(w), 4.U,
3.U
),
Mux(wrbypass_hit, inc_ctr(wrbypass(wrbypass_hit_idx)(w), io.update_taken(w)),
inc_ctr(io.update_old_ctr(w), io.update_taken(w))
)
)
update_wdata(w).valid := true.B
update_wdata(w).tag := update_tag
update_hi_wdata(w) := io.update_u(w)(1)
update_lo_wdata(w) := io.update_u(w)(0)
}
when (io.update_mask.reduce(_||_)) {
when (wrbypass_hits.reduce(_||_)) {
wrbypass(wrbypass_hit_idx) := VecInit(update_wdata.map(_.ctr))
} .otherwise {
wrbypass (wrbypass_enq_idx) := VecInit(update_wdata.map(_.ctr))
wrbypass_tags(wrbypass_enq_idx) := update_tag
wrbypass_idxs(wrbypass_enq_idx) := update_idx
wrbypass_enq_idx := WrapInc(wrbypass_enq_idx, nWrBypassEntries)
}
}
}
case class BoomTageParams(
// nSets, histLen, tagSz
tableInfo: Seq[Tuple3[Int, Int, Int]] = Seq(( 128, 2, 7),
( 128, 4, 7),
( 256, 8, 8),
( 256, 16, 8),
( 128, 32, 9),
( 128, 64, 9)),
uBitPeriod: Int = 2048
)
class TageBranchPredictorBank(params: BoomTageParams = BoomTageParams())(implicit p: Parameters) extends BranchPredictorBank()(p)
{
val tageUBitPeriod = params.uBitPeriod
val tageNTables = params.tableInfo.size
class TageMeta extends Bundle
{
val provider = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W)))
val alt_differs = Vec(bankWidth, Output(Bool()))
val provider_u = Vec(bankWidth, Output(UInt(2.W)))
val provider_ctr = Vec(bankWidth, Output(UInt(3.W)))
val allocate = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W)))
}
val f3_meta = Wire(new TageMeta)
override val metaSz = f3_meta.asUInt.getWidth
require(metaSz <= bpdMaxMetaLength)
def inc_u(u: UInt, alt_differs: Bool, mispredict: Bool): UInt = {
Mux(!alt_differs, u,
Mux(mispredict, Mux(u === 0.U, 0.U, u - 1.U),
Mux(u === 3.U, 3.U, u + 1.U)))
}
val tt = params.tableInfo map {
case (n, l, s) => {
val t = Module(new TageTable(n, s, l, params.uBitPeriod))
t.io.f1_req_valid := RegNext(io.f0_valid)
t.io.f1_req_pc := RegNext(io.f0_pc)
t.io.f1_req_ghist := io.f1_ghist
(t, t.mems)
}
}
val tables = tt.map(_._1)
val mems = tt.map(_._2).flatten
val f3_resps = VecInit(tables.map(_.io.f3_resp))
val s1_update_meta = s1_update.bits.meta.asTypeOf(new TageMeta)
val s1_update_mispredict_mask = UIntToOH(s1_update.bits.cfi_idx.bits) &
Fill(bankWidth, s1_update.bits.cfi_mispredicted)
val s1_update_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, Bool()))))
val s1_update_u_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, UInt(1.W)))))
val s1_update_taken = Wire(Vec(tageNTables, Vec(bankWidth, Bool())))
val s1_update_old_ctr = Wire(Vec(tageNTables, Vec(bankWidth, UInt(3.W))))
val s1_update_alloc = Wire(Vec(tageNTables, Vec(bankWidth, Bool())))
val s1_update_u = Wire(Vec(tageNTables, Vec(bankWidth, UInt(2.W))))
s1_update_taken := DontCare
s1_update_old_ctr := DontCare
s1_update_alloc := DontCare
s1_update_u := DontCare
for (w <- 0 until bankWidth) {
var altpred = io.resp_in(0).f3(w).taken
val final_altpred = WireInit(io.resp_in(0).f3(w).taken)
var provided = false.B
var provider = 0.U
io.resp.f3(w).taken := io.resp_in(0).f3(w).taken
for (i <- 0 until tageNTables) {
val hit = f3_resps(i)(w).valid
val ctr = f3_resps(i)(w).bits.ctr
when (hit) {
io.resp.f3(w).taken := Mux(ctr === 3.U || ctr === 4.U, altpred, ctr(2))
final_altpred := altpred
}
provided = provided || hit
provider = Mux(hit, i.U, provider)
altpred = Mux(hit, f3_resps(i)(w).bits.ctr(2), altpred)
}
f3_meta.provider(w).valid := provided
f3_meta.provider(w).bits := provider
f3_meta.alt_differs(w) := final_altpred =/= io.resp.f3(w).taken
f3_meta.provider_u(w) := f3_resps(provider)(w).bits.u
f3_meta.provider_ctr(w) := f3_resps(provider)(w).bits.ctr
// Create a mask of tables which did not hit our query, and also contain useless entries
// and also uses a longer history than the provider
val allocatable_slots = (
VecInit(f3_resps.map(r => !r(w).valid && r(w).bits.u === 0.U)).asUInt &
~(MaskLower(UIntToOH(provider)) & Fill(tageNTables, provided))
)
val alloc_lfsr = random.LFSR(tageNTables max 2)
val first_entry = PriorityEncoder(allocatable_slots)
val masked_entry = PriorityEncoder(allocatable_slots & alloc_lfsr)
val alloc_entry = Mux(allocatable_slots(masked_entry),
masked_entry,
first_entry)
f3_meta.allocate(w).valid := allocatable_slots =/= 0.U
f3_meta.allocate(w).bits := alloc_entry
val update_was_taken = (s1_update.bits.cfi_idx.valid &&
(s1_update.bits.cfi_idx.bits === w.U) &&
s1_update.bits.cfi_taken)
when (s1_update.bits.br_mask(w) && s1_update.valid && s1_update.bits.is_commit_update) {
when (s1_update_meta.provider(w).valid) {
val provider = s1_update_meta.provider(w).bits
s1_update_mask(provider)(w) := true.B
s1_update_u_mask(provider)(w) := true.B
val new_u = inc_u(s1_update_meta.provider_u(w),
s1_update_meta.alt_differs(w),
s1_update_mispredict_mask(w))
s1_update_u (provider)(w) := new_u
s1_update_taken (provider)(w) := update_was_taken
s1_update_old_ctr(provider)(w) := s1_update_meta.provider_ctr(w)
s1_update_alloc (provider)(w) := false.B
}
}
}
when (s1_update.valid && s1_update.bits.is_commit_update && s1_update.bits.cfi_mispredicted && s1_update.bits.cfi_idx.valid) {
val idx = s1_update.bits.cfi_idx.bits
val allocate = s1_update_meta.allocate(idx)
when (allocate.valid) {
s1_update_mask (allocate.bits)(idx) := true.B
s1_update_taken(allocate.bits)(idx) := s1_update.bits.cfi_taken
s1_update_alloc(allocate.bits)(idx) := true.B
s1_update_u_mask(allocate.bits)(idx) := true.B
s1_update_u (allocate.bits)(idx) := 0.U
} .otherwise {
val provider = s1_update_meta.provider(idx)
val decr_mask = Mux(provider.valid, ~MaskLower(UIntToOH(provider.bits)), 0.U)
for (i <- 0 until tageNTables) {
when (decr_mask(i)) {
s1_update_u_mask(i)(idx) := true.B
s1_update_u (i)(idx) := 0.U
}
}
}
}
for (i <- 0 until tageNTables) {
for (w <- 0 until bankWidth) {
tables(i).io.update_mask(w) := RegNext(s1_update_mask(i)(w))
tables(i).io.update_taken(w) := RegNext(s1_update_taken(i)(w))
tables(i).io.update_alloc(w) := RegNext(s1_update_alloc(i)(w))
tables(i).io.update_old_ctr(w) := RegNext(s1_update_old_ctr(i)(w))
tables(i).io.update_u_mask(w) := RegNext(s1_update_u_mask(i)(w))
tables(i).io.update_u(w) := RegNext(s1_update_u(i)(w))
}
tables(i).io.update_pc := RegNext(s1_update.bits.pc)
tables(i).io.update_hist := RegNext(s1_update.bits.ghist)
}
//io.f3_meta := Cat(f3_meta.asUInt, micro.io.f3_meta(micro.metaSz-1,0), base.io.f3_meta(base.metaSz-1, 0))
io.f3_meta := f3_meta.asUInt
} | module table_3(
input [6:0] R0_addr,
input R0_en,
input R0_clk,
output [51:0] R0_data,
input [6:0] W0_addr,
input W0_clk,
input [51:0] W0_data,
input [3:0] W0_mask
);
table_1_ext table_1_ext (
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (1'h1),
.W0_clk (W0_clk),
.W0_data (W0_data),
.W0_mask (W0_mask)
);
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.jtag for license details.
package freechips.rocketchip.jtag
import chisel3._
import chisel3.reflect.DataMirror
import chisel3.internal.firrtl.KnownWidth
import chisel3.util.{Cat, Valid}
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util.property
/** Base JTAG shifter IO, viewed from input to shift register chain.
* Can be chained together.
*/
class ShifterIO extends Bundle {
val shift = Bool() // advance the scan chain on clock high
val data = Bool() // as input: bit to be captured into shifter MSB on next rising edge; as output: value of shifter LSB
val capture = Bool() // high in the CaptureIR/DR state when this chain is selected
val update = Bool() // high in the UpdateIR/DR state when this chain is selected
/** Sets a output shifter IO's control signals from a input shifter IO's control signals.
*/
def chainControlFrom(in: ShifterIO): Unit = {
shift := in.shift
capture := in.capture
update := in.update
}
}
trait ChainIO extends Bundle {
val chainIn = Input(new ShifterIO)
val chainOut = Output(new ShifterIO)
}
class Capture[+T <: Data](gen: T) extends Bundle {
val bits = Input(gen) // data to capture, should be always valid
val capture = Output(Bool()) // will be high in capture state (single cycle), captured on following rising edge
}
object Capture {
def apply[T <: Data](gen: T): Capture[T] = new Capture(gen)
}
/** Trait that all JTAG chains (data and instruction registers) must extend, providing basic chain
* IO.
*/
trait Chain extends Module {
val io: ChainIO
}
/** One-element shift register, data register for bypass mode.
*
* Implements Clause 10.
*/
class JtagBypassChain(implicit val p: Parameters) extends Chain {
class ModIO extends ChainIO
val io = IO(new ModIO)
io.chainOut chainControlFrom io.chainIn
val reg = Reg(Bool()) // 10.1.1a single shift register stage
io.chainOut.data := reg
property.cover(io.chainIn.capture, "bypass_chain_capture", "JTAG; bypass_chain_capture; This Bypass Chain captured data")
when (io.chainIn.capture) {
reg := false.B // 10.1.1b capture logic 0 on TCK rising
} .elsewhen (io.chainIn.shift) {
reg := io.chainIn.data
}
assert(!(io.chainIn.capture && io.chainIn.update)
&& !(io.chainIn.capture && io.chainIn.shift)
&& !(io.chainIn.update && io.chainIn.shift))
}
object JtagBypassChain {
def apply()(implicit p: Parameters) = new JtagBypassChain
}
/** Simple shift register with parallel capture only, for read-only data registers.
*
* Number of stages is the number of bits in gen, which must have a known width.
*
* Useful notes:
* 7.2.1c shifter shifts on TCK rising edge
* 4.3.2a TDI captured on TCK rising edge, 6.1.2.1b assumed changes on TCK falling edge
*/
class CaptureChain[+T <: Data](gen: T)(implicit val p: Parameters) extends Chain {
override def desiredName = s"CaptureChain_${gen.typeName}"
class ModIO extends ChainIO {
val capture = Capture(gen)
}
val io = IO(new ModIO)
io.chainOut chainControlFrom io.chainIn
val n = DataMirror.widthOf(gen) match {
case KnownWidth(x) => x
case _ => require(false, s"can't generate chain for unknown width data type $gen"); -1 // TODO: remove -1 type hack
}
val regs = (0 until n) map (x => Reg(Bool()))
io.chainOut.data := regs(0)
property.cover(io.chainIn.capture, "chain_capture", "JTAG; chain_capture; This Chain captured data")
when (io.chainIn.capture) {
(0 until n) map (x => regs(x) := io.capture.bits.asUInt(x))
io.capture.capture := true.B
} .elsewhen (io.chainIn.shift) {
regs(n-1) := io.chainIn.data
(0 until n-1) map (x => regs(x) := regs(x+1))
io.capture.capture := false.B
} .otherwise {
io.capture.capture := false.B
}
assert(!(io.chainIn.capture && io.chainIn.update)
&& !(io.chainIn.capture && io.chainIn.shift)
&& !(io.chainIn.update && io.chainIn.shift))
}
object CaptureChain {
def apply[T <: Data](gen: T)(implicit p: Parameters) = new CaptureChain(gen)
}
/** Simple shift register with parallel capture and update. Useful for general instruction and data
* scan registers.
*
* Number of stages is the max number of bits in genCapture and genUpdate, both of which must have
* known widths. If there is a width mismatch, the unused most significant bits will be zero.
*
* Useful notes:
* 7.2.1c shifter shifts on TCK rising edge
* 4.3.2a TDI captured on TCK rising edge, 6.1.2.1b assumed changes on TCK falling edge
*/
class CaptureUpdateChain[+T <: Data, +V <: Data](genCapture: T, genUpdate: V)(implicit val p: Parameters) extends Chain {
override def desiredName = s"CaptureUpdateChain_${genCapture.typeName}_To_${genUpdate.typeName}"
class ModIO extends ChainIO {
val capture = Capture(genCapture)
val update = Valid(genUpdate) // valid high when in update state (single cycle), contents may change any time after
}
val io = IO(new ModIO)
io.chainOut chainControlFrom io.chainIn
val captureWidth = DataMirror.widthOf(genCapture) match {
case KnownWidth(x) => x
case _ => require(false, s"can't generate chain for unknown width data type $genCapture"); -1 // TODO: remove -1 type hack
}
val updateWidth = DataMirror.widthOf(genUpdate) match {
case KnownWidth(x) => x
case _ => require(false, s"can't generate chain for unknown width data type $genUpdate"); -1 // TODO: remove -1 type hack
}
val n = math.max(captureWidth, updateWidth)
val regs = (0 until n) map (x => Reg(Bool()))
io.chainOut.data := regs(0)
val updateBits = Cat(regs.reverse)(updateWidth-1, 0)
io.update.bits := updateBits.asTypeOf(io.update.bits)
val captureBits = io.capture.bits.asUInt
property.cover(io.chainIn.capture, "chain_capture", "JTAG;chain_capture; This Chain captured data")
property.cover(io.chainIn.capture, "chain_update", "JTAG;chain_update; This Chain updated data")
when (io.chainIn.capture) {
(0 until math.min(n, captureWidth)) map (x => regs(x) := captureBits(x))
(captureWidth until n) map (x => regs(x) := 0.U)
io.capture.capture := true.B
io.update.valid := false.B
} .elsewhen (io.chainIn.update) {
io.capture.capture := false.B
io.update.valid := true.B
} .elsewhen (io.chainIn.shift) {
regs(n-1) := io.chainIn.data
(0 until n-1) map (x => regs(x) := regs(x+1))
io.capture.capture := false.B
io.update.valid := false.B
} .otherwise {
io.capture.capture := false.B
io.update.valid := false.B
}
assert(!(io.chainIn.capture && io.chainIn.update)
&& !(io.chainIn.capture && io.chainIn.shift)
&& !(io.chainIn.update && io.chainIn.shift))
}
object CaptureUpdateChain {
/** Capture-update chain with matching capture and update types.
*/
def apply[T <: Data](gen: T)(implicit p: Parameters) = new CaptureUpdateChain(gen, gen)
def apply[T <: Data, V <: Data](genCapture: T, genUpdate: V)(implicit p: Parameters) =
new CaptureUpdateChain(genCapture, genUpdate)
} | module CaptureUpdateChain_UInt5_To_UInt5(
input clock,
input reset,
input io_chainIn_shift,
input io_chainIn_data,
input io_chainIn_capture,
input io_chainIn_update,
output io_chainOut_data,
output [4:0] io_update_bits
);
reg regs_0;
reg regs_1;
reg regs_2;
reg regs_3;
reg regs_4;
always @(posedge clock) begin
if (io_chainIn_capture) begin
regs_0 <= 1'h1;
regs_1 <= 1'h0;
regs_2 <= 1'h0;
regs_3 <= 1'h0;
regs_4 <= 1'h0;
end
else if (io_chainIn_update | ~io_chainIn_shift) begin
end
else begin
regs_0 <= regs_1;
regs_1 <= regs_2;
regs_2 <= regs_3;
regs_3 <= regs_4;
regs_4 <= io_chainIn_data;
end
end
assign io_chainOut_data = regs_0;
assign io_update_bits = {regs_4, regs_3, regs_2, regs_1, regs_0};
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3.{Data, SyncReadMem, Vec}
import chisel3.util.log2Ceil
object DescribedSRAM {
def apply[T <: Data](
name: String,
desc: String,
size: BigInt, // depth
data: T
): SyncReadMem[T] = {
val mem = SyncReadMem(size, data)
mem.suggestName(name)
val granWidth = data match {
case v: Vec[_] => v.head.getWidth
case d => d.getWidth
}
val uid = 0
Annotated.srams(
component = mem,
name = name,
address_width = log2Ceil(size),
data_width = data.getWidth,
depth = size,
description = desc,
write_mask_granularity = granWidth
)
mem
}
} | module cc_banks_3(
input [13:0] RW0_addr,
input RW0_en,
input RW0_clk,
input RW0_wmode,
input [31:0] RW0_wdata,
output [31:0] RW0_rdata
);
cc_banks_0_ext cc_banks_0_ext (
.RW0_addr (RW0_addr),
.RW0_en (RW0_en),
.RW0_clk (RW0_clk),
.RW0_wmode (RW0_wmode),
.RW0_wdata (RW0_wdata),
.RW0_rdata (RW0_rdata)
);
endmodule |
Generate the Verilog code corresponding to this Chisel code package testchipip.serdes
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config._
class GenericSerializer[T <: Data](t: T, flitWidth: Int) extends Module {
override def desiredName = s"GenericSerializer_${t.typeName}w${t.getWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(t))
val out = Decoupled(new Flit(flitWidth))
val busy = Output(Bool())
})
val dataBits = t.getWidth.max(flitWidth)
val dataBeats = (dataBits - 1) / flitWidth + 1
require(dataBeats >= 1)
val data = Reg(Vec(dataBeats, UInt(flitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready && beat === 0.U
io.out.valid := io.in.valid || beat =/= 0.U
io.out.bits.flit := Mux(beat === 0.U, io.in.bits.asUInt, data(beat))
when (io.out.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) {
data := io.in.bits.asTypeOf(Vec(dataBeats, UInt(flitWidth.W)))
data(0) := DontCare // unused, DCE this
}
}
io.busy := io.out.valid
}
class GenericDeserializer[T <: Data](t: T, flitWidth: Int) extends Module {
override def desiredName = s"GenericDeserializer_${t.typeName}w${t.getWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(t)
val busy = Output(Bool())
})
val dataBits = t.getWidth.max(flitWidth)
val dataBeats = (dataBits - 1) / flitWidth + 1
require(dataBeats >= 1)
val data = Reg(Vec(dataBeats-1, UInt(flitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready || beat =/= (dataBeats-1).U
io.out.valid := io.in.valid && beat === (dataBeats-1).U
io.out.bits := (if (dataBeats == 1) {
io.in.bits.flit.asTypeOf(t)
} else {
Cat(io.in.bits.flit, data.asUInt).asTypeOf(t)
})
when (io.in.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
if (dataBeats > 1) {
when (beat =/= (dataBeats-1).U) {
data(beat(log2Ceil(dataBeats-1)-1,0)) := io.in.bits.flit
}
}
}
io.busy := beat =/= 0.U
}
class FlitToPhit(flitWidth: Int, phitWidth: Int) extends Module {
override def desiredName = s"FlitToPhit_f${flitWidth}_p${phitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Phit(phitWidth))
})
require(flitWidth >= phitWidth)
val dataBeats = (flitWidth - 1) / phitWidth + 1
val data = Reg(Vec(dataBeats-1, UInt(phitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready && beat === 0.U
io.out.valid := io.in.valid || beat =/= 0.U
io.out.bits.phit := (if (dataBeats == 1) io.in.bits.flit else Mux(beat === 0.U, io.in.bits.flit, data(beat-1.U)))
when (io.out.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) {
data := io.in.bits.asTypeOf(Vec(dataBeats, UInt(phitWidth.W))).tail
}
}
}
object FlitToPhit {
def apply(flit: DecoupledIO[Flit], phitWidth: Int): DecoupledIO[Phit] = {
val flit2phit = Module(new FlitToPhit(flit.bits.flitWidth, phitWidth))
flit2phit.io.in <> flit
flit2phit.io.out
}
}
class PhitToFlit(flitWidth: Int, phitWidth: Int) extends Module {
override def desiredName = s"PhitToFlit_p${phitWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Phit(phitWidth)))
val out = Decoupled(new Flit(flitWidth))
})
require(flitWidth >= phitWidth)
val dataBeats = (flitWidth - 1) / phitWidth + 1
val data = Reg(Vec(dataBeats-1, UInt(phitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready || beat =/= (dataBeats-1).U
io.out.valid := io.in.valid && beat === (dataBeats-1).U
io.out.bits.flit := (if (dataBeats == 1) io.in.bits.phit else Cat(io.in.bits.phit, data.asUInt))
when (io.in.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
if (dataBeats > 1) {
when (beat =/= (dataBeats-1).U) {
data(beat) := io.in.bits.phit
}
}
}
}
object PhitToFlit {
def apply(phit: DecoupledIO[Phit], flitWidth: Int): DecoupledIO[Flit] = {
val phit2flit = Module(new PhitToFlit(flitWidth, phit.bits.phitWidth))
phit2flit.io.in <> phit
phit2flit.io.out
}
def apply(phit: ValidIO[Phit], flitWidth: Int): ValidIO[Flit] = {
val phit2flit = Module(new PhitToFlit(flitWidth, phit.bits.phitWidth))
phit2flit.io.in.valid := phit.valid
phit2flit.io.in.bits := phit.bits
when (phit.valid) { assert(phit2flit.io.in.ready) }
val out = Wire(Valid(new Flit(flitWidth)))
out.valid := phit2flit.io.out.valid
out.bits := phit2flit.io.out.bits
phit2flit.io.out.ready := true.B
out
}
}
class PhitArbiter(phitWidth: Int, flitWidth: Int, channels: Int) extends Module {
override def desiredName = s"PhitArbiter_p${phitWidth}_f${flitWidth}_n${channels}"
val io = IO(new Bundle {
val in = Flipped(Vec(channels, Decoupled(new Phit(phitWidth))))
val out = Decoupled(new Phit(phitWidth))
})
if (channels == 1) {
io.out <> io.in(0)
} else {
val headerWidth = log2Ceil(channels)
val headerBeats = (headerWidth - 1) / phitWidth + 1
val flitBeats = (flitWidth - 1) / phitWidth + 1
val beats = headerBeats + flitBeats
val beat = RegInit(0.U(log2Ceil(beats).W))
val chosen_reg = Reg(UInt(headerWidth.W))
val chosen_prio = PriorityEncoder(io.in.map(_.valid))
val chosen = Mux(beat === 0.U, chosen_prio, chosen_reg)
val header_idx = if (headerBeats == 1) 0.U else beat(log2Ceil(headerBeats)-1,0)
io.out.valid := VecInit(io.in.map(_.valid))(chosen)
io.out.bits.phit := Mux(beat < headerBeats.U,
chosen.asTypeOf(Vec(headerBeats, UInt(phitWidth.W)))(header_idx),
VecInit(io.in.map(_.bits.phit))(chosen))
for (i <- 0 until channels) {
io.in(i).ready := io.out.ready && beat >= headerBeats.U && chosen_reg === i.U
}
when (io.out.fire) {
beat := Mux(beat === (beats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) { chosen_reg := chosen_prio }
}
}
}
class PhitDemux(phitWidth: Int, flitWidth: Int, channels: Int) extends Module {
override def desiredName = s"PhitDemux_p${phitWidth}_f${flitWidth}_n${channels}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Phit(phitWidth)))
val out = Vec(channels, Decoupled(new Phit(phitWidth)))
})
if (channels == 1) {
io.out(0) <> io.in
} else {
val headerWidth = log2Ceil(channels)
val headerBeats = (headerWidth - 1) / phitWidth + 1
val flitBeats = (flitWidth - 1) / phitWidth + 1
val beats = headerBeats + flitBeats
val beat = RegInit(0.U(log2Ceil(beats).W))
val channel_vec = Reg(Vec(headerBeats, UInt(phitWidth.W)))
val channel = channel_vec.asUInt(log2Ceil(channels)-1,0)
val header_idx = if (headerBeats == 1) 0.U else beat(log2Ceil(headerBeats)-1,0)
io.in.ready := beat < headerBeats.U || VecInit(io.out.map(_.ready))(channel)
for (c <- 0 until channels) {
io.out(c).valid := io.in.valid && beat >= headerBeats.U && channel === c.U
io.out(c).bits.phit := io.in.bits.phit
}
when (io.in.fire) {
beat := Mux(beat === (beats-1).U, 0.U, beat + 1.U)
when (beat < headerBeats.U) {
channel_vec(header_idx) := io.in.bits.phit
}
}
}
}
class DecoupledFlitToCreditedFlit(flitWidth: Int, bufferSz: Int) extends Module {
override def desiredName = s"DecoupledFlitToCreditedFlit_f${flitWidth}_b${bufferSz}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Flit(flitWidth))
val credit = Flipped(Decoupled(new Flit(flitWidth)))
})
val creditWidth = log2Ceil(bufferSz)
require(creditWidth <= flitWidth)
val credits = RegInit(0.U((creditWidth+1).W))
val credit_incr = io.out.fire
val credit_decr = io.credit.fire
when (credit_incr || credit_decr) {
credits := credits + credit_incr - Mux(io.credit.valid, io.credit.bits.flit +& 1.U, 0.U)
}
io.out.valid := io.in.valid && credits < bufferSz.U
io.out.bits.flit := io.in.bits.flit
io.in.ready := io.out.ready && credits < bufferSz.U
io.credit.ready := true.B
}
class CreditedFlitToDecoupledFlit(flitWidth: Int, bufferSz: Int) extends Module {
override def desiredName = s"CreditedFlitToDecoupledFlit_f${flitWidth}_b${bufferSz}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Flit(flitWidth))
val credit = Decoupled(new Flit(flitWidth))
})
val creditWidth = log2Ceil(bufferSz)
require(creditWidth <= flitWidth)
val buffer = Module(new Queue(new Flit(flitWidth), bufferSz))
val credits = RegInit(0.U((creditWidth+1).W))
val credit_incr = buffer.io.deq.fire
val credit_decr = io.credit.fire
when (credit_incr || credit_decr) {
credits := credit_incr + Mux(credit_decr, 0.U, credits)
}
buffer.io.enq.valid := io.in.valid
buffer.io.enq.bits := io.in.bits
io.in.ready := true.B
when (io.in.valid) { assert(buffer.io.enq.ready) }
io.out <> buffer.io.deq
io.credit.valid := credits =/= 0.U
io.credit.bits.flit := credits - 1.U
} | module GenericDeserializer_TLBeatw67_f32(
input clock,
input reset,
output io_in_ready,
input io_in_valid,
input [31:0] io_in_bits_flit,
input io_out_ready,
output io_out_valid,
output [64:0] io_out_bits_payload,
output io_out_bits_head,
output io_out_bits_tail
);
reg [31:0] data_0;
reg [31:0] data_1;
reg [1:0] beat;
wire io_in_ready_0 = io_out_ready | beat != 2'h2;
wire _beat_T = beat == 2'h2;
wire _GEN = io_in_ready_0 & io_in_valid;
wire _GEN_0 = beat == 2'h2;
always @(posedge clock) begin
if (~_GEN | _GEN_0 | beat[0]) begin
end
else
data_0 <= io_in_bits_flit;
if (~_GEN | _GEN_0 | ~(beat[0])) begin
end
else
data_1 <= io_in_bits_flit;
if (reset)
beat <= 2'h0;
else if (_GEN)
beat <= _beat_T ? 2'h0 : beat + 2'h1;
end
assign io_in_ready = io_in_ready_0;
assign io_out_valid = io_in_valid & _beat_T;
assign io_out_bits_payload = {io_in_bits_flit[2:0], data_1, data_0[31:2]};
assign io_out_bits_head = data_0[1];
assign io_out_bits_tail = data_0[0];
endmodule |
Generate the Verilog code corresponding to this Chisel code //******************************************************************************
// Copyright (c) 2015 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// RISCV Processor Issue Slot Logic
//--------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// Note: stores (and AMOs) are "broken down" into 2 uops, but stored within a single issue-slot.
// TODO XXX make a separate issueSlot for MemoryIssueSlots, and only they break apart stores.
// TODO Disable ldspec for FP queue.
package boom.v3.exu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import boom.v3.common._
import boom.v3.util._
import FUConstants._
/**
* IO bundle to interact with Issue slot
*
* @param numWakeupPorts number of wakeup ports for the slot
*/
class IssueSlotIO(val numWakeupPorts: Int)(implicit p: Parameters) extends BoomBundle
{
val valid = Output(Bool())
val will_be_valid = Output(Bool()) // TODO code review, do we need this signal so explicitely?
val request = Output(Bool())
val request_hp = Output(Bool())
val grant = Input(Bool())
val brupdate = Input(new BrUpdateInfo())
val kill = Input(Bool()) // pipeline flush
val clear = Input(Bool()) // entry being moved elsewhere (not mutually exclusive with grant)
val ldspec_miss = Input(Bool()) // Previous cycle's speculative load wakeup was mispredicted.
val wakeup_ports = Flipped(Vec(numWakeupPorts, Valid(new IqWakeup(maxPregSz))))
val pred_wakeup_port = Flipped(Valid(UInt(log2Ceil(ftqSz).W)))
val spec_ld_wakeup = Flipped(Vec(memWidth, Valid(UInt(width=maxPregSz.W))))
val in_uop = Flipped(Valid(new MicroOp())) // if valid, this WILL overwrite an entry!
val out_uop = Output(new MicroOp()) // the updated slot uop; will be shifted upwards in a collasping queue.
val uop = Output(new MicroOp()) // the current Slot's uop. Sent down the pipeline when issued.
val debug = {
val result = new Bundle {
val p1 = Bool()
val p2 = Bool()
val p3 = Bool()
val ppred = Bool()
val state = UInt(width=2.W)
}
Output(result)
}
}
/**
* Single issue slot. Holds a uop within the issue queue
*
* @param numWakeupPorts number of wakeup ports
*/
class IssueSlot(val numWakeupPorts: Int)(implicit p: Parameters)
extends BoomModule
with IssueUnitConstants
{
val io = IO(new IssueSlotIO(numWakeupPorts))
// slot invalid?
// slot is valid, holding 1 uop
// slot is valid, holds 2 uops (like a store)
def is_invalid = state === s_invalid
def is_valid = state =/= s_invalid
val next_state = Wire(UInt()) // the next state of this slot (which might then get moved to a new slot)
val next_uopc = Wire(UInt()) // the next uopc of this slot (which might then get moved to a new slot)
val next_lrs1_rtype = Wire(UInt()) // the next reg type of this slot (which might then get moved to a new slot)
val next_lrs2_rtype = Wire(UInt()) // the next reg type of this slot (which might then get moved to a new slot)
val state = RegInit(s_invalid)
val p1 = RegInit(false.B)
val p2 = RegInit(false.B)
val p3 = RegInit(false.B)
val ppred = RegInit(false.B)
// Poison if woken up by speculative load.
// Poison lasts 1 cycle (as ldMiss will come on the next cycle).
// SO if poisoned is true, set it to false!
val p1_poisoned = RegInit(false.B)
val p2_poisoned = RegInit(false.B)
p1_poisoned := false.B
p2_poisoned := false.B
val next_p1_poisoned = Mux(io.in_uop.valid, io.in_uop.bits.iw_p1_poisoned, p1_poisoned)
val next_p2_poisoned = Mux(io.in_uop.valid, io.in_uop.bits.iw_p2_poisoned, p2_poisoned)
val slot_uop = RegInit(NullMicroOp)
val next_uop = Mux(io.in_uop.valid, io.in_uop.bits, slot_uop)
//-----------------------------------------------------------------------------
// next slot state computation
// compute the next state for THIS entry slot (in a collasping queue, the
// current uop may get moved elsewhere, and a new uop can enter
when (io.kill) {
state := s_invalid
} .elsewhen (io.in_uop.valid) {
state := io.in_uop.bits.iw_state
} .elsewhen (io.clear) {
state := s_invalid
} .otherwise {
state := next_state
}
//-----------------------------------------------------------------------------
// "update" state
// compute the next state for the micro-op in this slot. This micro-op may
// be moved elsewhere, so the "next_state" travels with it.
// defaults
next_state := state
next_uopc := slot_uop.uopc
next_lrs1_rtype := slot_uop.lrs1_rtype
next_lrs2_rtype := slot_uop.lrs2_rtype
when (io.kill) {
next_state := s_invalid
} .elsewhen ((io.grant && (state === s_valid_1)) ||
(io.grant && (state === s_valid_2) && p1 && p2 && ppred)) {
// try to issue this uop.
when (!(io.ldspec_miss && (p1_poisoned || p2_poisoned))) {
next_state := s_invalid
}
} .elsewhen (io.grant && (state === s_valid_2)) {
when (!(io.ldspec_miss && (p1_poisoned || p2_poisoned))) {
next_state := s_valid_1
when (p1) {
slot_uop.uopc := uopSTD
next_uopc := uopSTD
slot_uop.lrs1_rtype := RT_X
next_lrs1_rtype := RT_X
} .otherwise {
slot_uop.lrs2_rtype := RT_X
next_lrs2_rtype := RT_X
}
}
}
when (io.in_uop.valid) {
slot_uop := io.in_uop.bits
assert (is_invalid || io.clear || io.kill, "trying to overwrite a valid issue slot.")
}
// Wakeup Compare Logic
// these signals are the "next_p*" for the current slot's micro-op.
// they are important for shifting the current slot_uop up to an other entry.
val next_p1 = WireInit(p1)
val next_p2 = WireInit(p2)
val next_p3 = WireInit(p3)
val next_ppred = WireInit(ppred)
when (io.in_uop.valid) {
p1 := !(io.in_uop.bits.prs1_busy)
p2 := !(io.in_uop.bits.prs2_busy)
p3 := !(io.in_uop.bits.prs3_busy)
ppred := !(io.in_uop.bits.ppred_busy)
}
when (io.ldspec_miss && next_p1_poisoned) {
assert(next_uop.prs1 =/= 0.U, "Poison bit can't be set for prs1=x0!")
p1 := false.B
}
when (io.ldspec_miss && next_p2_poisoned) {
assert(next_uop.prs2 =/= 0.U, "Poison bit can't be set for prs2=x0!")
p2 := false.B
}
for (i <- 0 until numWakeupPorts) {
when (io.wakeup_ports(i).valid &&
(io.wakeup_ports(i).bits.pdst === next_uop.prs1)) {
p1 := true.B
}
when (io.wakeup_ports(i).valid &&
(io.wakeup_ports(i).bits.pdst === next_uop.prs2)) {
p2 := true.B
}
when (io.wakeup_ports(i).valid &&
(io.wakeup_ports(i).bits.pdst === next_uop.prs3)) {
p3 := true.B
}
}
when (io.pred_wakeup_port.valid && io.pred_wakeup_port.bits === next_uop.ppred) {
ppred := true.B
}
for (w <- 0 until memWidth) {
assert (!(io.spec_ld_wakeup(w).valid && io.spec_ld_wakeup(w).bits === 0.U),
"Loads to x0 should never speculatively wakeup other instructions")
}
// TODO disable if FP IQ.
for (w <- 0 until memWidth) {
when (io.spec_ld_wakeup(w).valid &&
io.spec_ld_wakeup(w).bits === next_uop.prs1 &&
next_uop.lrs1_rtype === RT_FIX) {
p1 := true.B
p1_poisoned := true.B
assert (!next_p1_poisoned)
}
when (io.spec_ld_wakeup(w).valid &&
io.spec_ld_wakeup(w).bits === next_uop.prs2 &&
next_uop.lrs2_rtype === RT_FIX) {
p2 := true.B
p2_poisoned := true.B
assert (!next_p2_poisoned)
}
}
// Handle branch misspeculations
val next_br_mask = GetNewBrMask(io.brupdate, slot_uop)
// was this micro-op killed by a branch? if yes, we can't let it be valid if
// we compact it into an other entry
when (IsKilledByBranch(io.brupdate, slot_uop)) {
next_state := s_invalid
}
when (!io.in_uop.valid) {
slot_uop.br_mask := next_br_mask
}
//-------------------------------------------------------------
// Request Logic
io.request := is_valid && p1 && p2 && p3 && ppred && !io.kill
val high_priority = slot_uop.is_br || slot_uop.is_jal || slot_uop.is_jalr
io.request_hp := io.request && high_priority
when (state === s_valid_1) {
io.request := p1 && p2 && p3 && ppred && !io.kill
} .elsewhen (state === s_valid_2) {
io.request := (p1 || p2) && ppred && !io.kill
} .otherwise {
io.request := false.B
}
//assign outputs
io.valid := is_valid
io.uop := slot_uop
io.uop.iw_p1_poisoned := p1_poisoned
io.uop.iw_p2_poisoned := p2_poisoned
// micro-op will vacate due to grant.
val may_vacate = io.grant && ((state === s_valid_1) || (state === s_valid_2) && p1 && p2 && ppred)
val squash_grant = io.ldspec_miss && (p1_poisoned || p2_poisoned)
io.will_be_valid := is_valid && !(may_vacate && !squash_grant)
io.out_uop := slot_uop
io.out_uop.iw_state := next_state
io.out_uop.uopc := next_uopc
io.out_uop.lrs1_rtype := next_lrs1_rtype
io.out_uop.lrs2_rtype := next_lrs2_rtype
io.out_uop.br_mask := next_br_mask
io.out_uop.prs1_busy := !p1
io.out_uop.prs2_busy := !p2
io.out_uop.prs3_busy := !p3
io.out_uop.ppred_busy := !ppred
io.out_uop.iw_p1_poisoned := p1_poisoned
io.out_uop.iw_p2_poisoned := p2_poisoned
when (state === s_valid_2) {
when (p1 && p2 && ppred) {
; // send out the entire instruction as one uop
} .elsewhen (p1 && ppred) {
io.uop.uopc := slot_uop.uopc
io.uop.lrs2_rtype := RT_X
} .elsewhen (p2 && ppred) {
io.uop.uopc := uopSTD
io.uop.lrs1_rtype := RT_X
}
}
// debug outputs
io.debug.p1 := p1
io.debug.p2 := p2
io.debug.p3 := p3
io.debug.ppred := ppred
io.debug.state := state
} | module IssueSlot_8(
input clock,
input reset,
output io_valid,
output io_will_be_valid,
output io_request,
input io_grant,
input [7:0] io_brupdate_b1_resolve_mask,
input [7:0] io_brupdate_b1_mispredict_mask,
input io_kill,
input io_clear,
input io_ldspec_miss,
input io_wakeup_ports_0_valid,
input [5:0] io_wakeup_ports_0_bits_pdst,
input io_wakeup_ports_1_valid,
input [5:0] io_wakeup_ports_1_bits_pdst,
input io_wakeup_ports_2_valid,
input [5:0] io_wakeup_ports_2_bits_pdst,
input io_spec_ld_wakeup_0_valid,
input [5:0] io_spec_ld_wakeup_0_bits,
input io_in_uop_valid,
input [6:0] io_in_uop_bits_uopc,
input [31:0] io_in_uop_bits_inst,
input [31:0] io_in_uop_bits_debug_inst,
input io_in_uop_bits_is_rvc,
input [39:0] io_in_uop_bits_debug_pc,
input [2:0] io_in_uop_bits_iq_type,
input [9:0] io_in_uop_bits_fu_code,
input [1:0] io_in_uop_bits_iw_state,
input io_in_uop_bits_iw_p1_poisoned,
input io_in_uop_bits_iw_p2_poisoned,
input io_in_uop_bits_is_br,
input io_in_uop_bits_is_jalr,
input io_in_uop_bits_is_jal,
input io_in_uop_bits_is_sfb,
input [7:0] io_in_uop_bits_br_mask,
input [2:0] io_in_uop_bits_br_tag,
input [3:0] io_in_uop_bits_ftq_idx,
input io_in_uop_bits_edge_inst,
input [5:0] io_in_uop_bits_pc_lob,
input io_in_uop_bits_taken,
input [19:0] io_in_uop_bits_imm_packed,
input [11:0] io_in_uop_bits_csr_addr,
input [4:0] io_in_uop_bits_rob_idx,
input [2:0] io_in_uop_bits_ldq_idx,
input [2:0] io_in_uop_bits_stq_idx,
input [1:0] io_in_uop_bits_rxq_idx,
input [5:0] io_in_uop_bits_pdst,
input [5:0] io_in_uop_bits_prs1,
input [5:0] io_in_uop_bits_prs2,
input [5:0] io_in_uop_bits_prs3,
input [3:0] io_in_uop_bits_ppred,
input io_in_uop_bits_prs1_busy,
input io_in_uop_bits_prs2_busy,
input io_in_uop_bits_prs3_busy,
input io_in_uop_bits_ppred_busy,
input [5:0] io_in_uop_bits_stale_pdst,
input io_in_uop_bits_exception,
input [63:0] io_in_uop_bits_exc_cause,
input io_in_uop_bits_bypassable,
input [4:0] io_in_uop_bits_mem_cmd,
input [1:0] io_in_uop_bits_mem_size,
input io_in_uop_bits_mem_signed,
input io_in_uop_bits_is_fence,
input io_in_uop_bits_is_fencei,
input io_in_uop_bits_is_amo,
input io_in_uop_bits_uses_ldq,
input io_in_uop_bits_uses_stq,
input io_in_uop_bits_is_sys_pc2epc,
input io_in_uop_bits_is_unique,
input io_in_uop_bits_flush_on_commit,
input io_in_uop_bits_ldst_is_rs1,
input [5:0] io_in_uop_bits_ldst,
input [5:0] io_in_uop_bits_lrs1,
input [5:0] io_in_uop_bits_lrs2,
input [5:0] io_in_uop_bits_lrs3,
input io_in_uop_bits_ldst_val,
input [1:0] io_in_uop_bits_dst_rtype,
input [1:0] io_in_uop_bits_lrs1_rtype,
input [1:0] io_in_uop_bits_lrs2_rtype,
input io_in_uop_bits_frs3_en,
input io_in_uop_bits_fp_val,
input io_in_uop_bits_fp_single,
input io_in_uop_bits_xcpt_pf_if,
input io_in_uop_bits_xcpt_ae_if,
input io_in_uop_bits_xcpt_ma_if,
input io_in_uop_bits_bp_debug_if,
input io_in_uop_bits_bp_xcpt_if,
input [1:0] io_in_uop_bits_debug_fsrc,
input [1:0] io_in_uop_bits_debug_tsrc,
output [6:0] io_out_uop_uopc,
output [31:0] io_out_uop_inst,
output [31:0] io_out_uop_debug_inst,
output io_out_uop_is_rvc,
output [39:0] io_out_uop_debug_pc,
output [2:0] io_out_uop_iq_type,
output [9:0] io_out_uop_fu_code,
output [1:0] io_out_uop_iw_state,
output io_out_uop_iw_p1_poisoned,
output io_out_uop_iw_p2_poisoned,
output io_out_uop_is_br,
output io_out_uop_is_jalr,
output io_out_uop_is_jal,
output io_out_uop_is_sfb,
output [7:0] io_out_uop_br_mask,
output [2:0] io_out_uop_br_tag,
output [3:0] io_out_uop_ftq_idx,
output io_out_uop_edge_inst,
output [5:0] io_out_uop_pc_lob,
output io_out_uop_taken,
output [19:0] io_out_uop_imm_packed,
output [11:0] io_out_uop_csr_addr,
output [4:0] io_out_uop_rob_idx,
output [2:0] io_out_uop_ldq_idx,
output [2:0] io_out_uop_stq_idx,
output [1:0] io_out_uop_rxq_idx,
output [5:0] io_out_uop_pdst,
output [5:0] io_out_uop_prs1,
output [5:0] io_out_uop_prs2,
output [5:0] io_out_uop_prs3,
output [3:0] io_out_uop_ppred,
output io_out_uop_prs1_busy,
output io_out_uop_prs2_busy,
output io_out_uop_prs3_busy,
output io_out_uop_ppred_busy,
output [5:0] io_out_uop_stale_pdst,
output io_out_uop_exception,
output [63:0] io_out_uop_exc_cause,
output io_out_uop_bypassable,
output [4:0] io_out_uop_mem_cmd,
output [1:0] io_out_uop_mem_size,
output io_out_uop_mem_signed,
output io_out_uop_is_fence,
output io_out_uop_is_fencei,
output io_out_uop_is_amo,
output io_out_uop_uses_ldq,
output io_out_uop_uses_stq,
output io_out_uop_is_sys_pc2epc,
output io_out_uop_is_unique,
output io_out_uop_flush_on_commit,
output io_out_uop_ldst_is_rs1,
output [5:0] io_out_uop_ldst,
output [5:0] io_out_uop_lrs1,
output [5:0] io_out_uop_lrs2,
output [5:0] io_out_uop_lrs3,
output io_out_uop_ldst_val,
output [1:0] io_out_uop_dst_rtype,
output [1:0] io_out_uop_lrs1_rtype,
output [1:0] io_out_uop_lrs2_rtype,
output io_out_uop_frs3_en,
output io_out_uop_fp_val,
output io_out_uop_fp_single,
output io_out_uop_xcpt_pf_if,
output io_out_uop_xcpt_ae_if,
output io_out_uop_xcpt_ma_if,
output io_out_uop_bp_debug_if,
output io_out_uop_bp_xcpt_if,
output [1:0] io_out_uop_debug_fsrc,
output [1:0] io_out_uop_debug_tsrc,
output [6:0] io_uop_uopc,
output [31:0] io_uop_inst,
output [31:0] io_uop_debug_inst,
output io_uop_is_rvc,
output [39:0] io_uop_debug_pc,
output [2:0] io_uop_iq_type,
output [9:0] io_uop_fu_code,
output [1:0] io_uop_iw_state,
output io_uop_iw_p1_poisoned,
output io_uop_iw_p2_poisoned,
output io_uop_is_br,
output io_uop_is_jalr,
output io_uop_is_jal,
output io_uop_is_sfb,
output [7:0] io_uop_br_mask,
output [2:0] io_uop_br_tag,
output [3:0] io_uop_ftq_idx,
output io_uop_edge_inst,
output [5:0] io_uop_pc_lob,
output io_uop_taken,
output [19:0] io_uop_imm_packed,
output [11:0] io_uop_csr_addr,
output [4:0] io_uop_rob_idx,
output [2:0] io_uop_ldq_idx,
output [2:0] io_uop_stq_idx,
output [1:0] io_uop_rxq_idx,
output [5:0] io_uop_pdst,
output [5:0] io_uop_prs1,
output [5:0] io_uop_prs2,
output [5:0] io_uop_prs3,
output [3:0] io_uop_ppred,
output io_uop_prs1_busy,
output io_uop_prs2_busy,
output io_uop_prs3_busy,
output io_uop_ppred_busy,
output [5:0] io_uop_stale_pdst,
output io_uop_exception,
output [63:0] io_uop_exc_cause,
output io_uop_bypassable,
output [4:0] io_uop_mem_cmd,
output [1:0] io_uop_mem_size,
output io_uop_mem_signed,
output io_uop_is_fence,
output io_uop_is_fencei,
output io_uop_is_amo,
output io_uop_uses_ldq,
output io_uop_uses_stq,
output io_uop_is_sys_pc2epc,
output io_uop_is_unique,
output io_uop_flush_on_commit,
output io_uop_ldst_is_rs1,
output [5:0] io_uop_ldst,
output [5:0] io_uop_lrs1,
output [5:0] io_uop_lrs2,
output [5:0] io_uop_lrs3,
output io_uop_ldst_val,
output [1:0] io_uop_dst_rtype,
output [1:0] io_uop_lrs1_rtype,
output [1:0] io_uop_lrs2_rtype,
output io_uop_frs3_en,
output io_uop_fp_val,
output io_uop_fp_single,
output io_uop_xcpt_pf_if,
output io_uop_xcpt_ae_if,
output io_uop_xcpt_ma_if,
output io_uop_bp_debug_if,
output io_uop_bp_xcpt_if,
output [1:0] io_uop_debug_fsrc,
output [1:0] io_uop_debug_tsrc
);
reg [1:0] state;
reg p1;
reg p2;
reg p3;
reg ppred;
reg p1_poisoned;
reg p2_poisoned;
wire next_p1_poisoned = io_in_uop_valid ? io_in_uop_bits_iw_p1_poisoned : p1_poisoned;
wire next_p2_poisoned = io_in_uop_valid ? io_in_uop_bits_iw_p2_poisoned : p2_poisoned;
reg [6:0] slot_uop_uopc;
reg [31:0] slot_uop_inst;
reg [31:0] slot_uop_debug_inst;
reg slot_uop_is_rvc;
reg [39:0] slot_uop_debug_pc;
reg [2:0] slot_uop_iq_type;
reg [9:0] slot_uop_fu_code;
reg [1:0] slot_uop_iw_state;
reg slot_uop_is_br;
reg slot_uop_is_jalr;
reg slot_uop_is_jal;
reg slot_uop_is_sfb;
reg [7:0] slot_uop_br_mask;
reg [2:0] slot_uop_br_tag;
reg [3:0] slot_uop_ftq_idx;
reg slot_uop_edge_inst;
reg [5:0] slot_uop_pc_lob;
reg slot_uop_taken;
reg [19:0] slot_uop_imm_packed;
reg [11:0] slot_uop_csr_addr;
reg [4:0] slot_uop_rob_idx;
reg [2:0] slot_uop_ldq_idx;
reg [2:0] slot_uop_stq_idx;
reg [1:0] slot_uop_rxq_idx;
reg [5:0] slot_uop_pdst;
reg [5:0] slot_uop_prs1;
reg [5:0] slot_uop_prs2;
reg [5:0] slot_uop_prs3;
reg [3:0] slot_uop_ppred;
reg slot_uop_prs1_busy;
reg slot_uop_prs2_busy;
reg slot_uop_prs3_busy;
reg slot_uop_ppred_busy;
reg [5:0] slot_uop_stale_pdst;
reg slot_uop_exception;
reg [63:0] slot_uop_exc_cause;
reg slot_uop_bypassable;
reg [4:0] slot_uop_mem_cmd;
reg [1:0] slot_uop_mem_size;
reg slot_uop_mem_signed;
reg slot_uop_is_fence;
reg slot_uop_is_fencei;
reg slot_uop_is_amo;
reg slot_uop_uses_ldq;
reg slot_uop_uses_stq;
reg slot_uop_is_sys_pc2epc;
reg slot_uop_is_unique;
reg slot_uop_flush_on_commit;
reg slot_uop_ldst_is_rs1;
reg [5:0] slot_uop_ldst;
reg [5:0] slot_uop_lrs1;
reg [5:0] slot_uop_lrs2;
reg [5:0] slot_uop_lrs3;
reg slot_uop_ldst_val;
reg [1:0] slot_uop_dst_rtype;
reg [1:0] slot_uop_lrs1_rtype;
reg [1:0] slot_uop_lrs2_rtype;
reg slot_uop_frs3_en;
reg slot_uop_fp_val;
reg slot_uop_fp_single;
reg slot_uop_xcpt_pf_if;
reg slot_uop_xcpt_ae_if;
reg slot_uop_xcpt_ma_if;
reg slot_uop_bp_debug_if;
reg slot_uop_bp_xcpt_if;
reg [1:0] slot_uop_debug_fsrc;
reg [1:0] slot_uop_debug_tsrc;
wire [5:0] next_uop_prs1 = io_in_uop_valid ? io_in_uop_bits_prs1 : slot_uop_prs1;
wire [5:0] next_uop_prs2 = io_in_uop_valid ? io_in_uop_bits_prs2 : slot_uop_prs2;
wire _GEN = state == 2'h2;
wire _GEN_0 = io_grant & _GEN;
wire _GEN_1 = io_grant & state == 2'h1 | _GEN_0 & p1 & p2 & ppred;
wire _GEN_2 = io_ldspec_miss & (p1_poisoned | p2_poisoned);
wire _GEN_3 = _GEN_0 & ~_GEN_2;
wire _GEN_4 = io_kill | _GEN_1;
wire _GEN_5 = _GEN_4 | ~(_GEN_0 & ~_GEN_2 & p1);
wire _GEN_6 = _GEN_4 | ~_GEN_3 | p1;
wire _GEN_7 = io_ldspec_miss & next_p1_poisoned;
wire _GEN_8 = io_ldspec_miss & next_p2_poisoned;
wire _GEN_9 = io_spec_ld_wakeup_0_valid & io_spec_ld_wakeup_0_bits == next_uop_prs1 & (io_in_uop_valid ? io_in_uop_bits_lrs1_rtype : slot_uop_lrs1_rtype) == 2'h0;
wire _GEN_10 = io_spec_ld_wakeup_0_valid & io_spec_ld_wakeup_0_bits == next_uop_prs2 & (io_in_uop_valid ? io_in_uop_bits_lrs2_rtype : slot_uop_lrs2_rtype) == 2'h0;
wire [7:0] next_br_mask = slot_uop_br_mask & ~io_brupdate_b1_resolve_mask;
wire _GEN_11 = (|(io_brupdate_b1_mispredict_mask & slot_uop_br_mask)) | io_kill;
wire _may_vacate_T = state == 2'h1;
wire _may_vacate_T_1 = state == 2'h2;
wire _GEN_12 = p1 & p2 & ppred;
wire _GEN_13 = p1 & ppred;
wire _GEN_14 = ~_may_vacate_T_1 | _GEN_12 | _GEN_13 | ~(p2 & ppred);
wire [5:0] next_uop_prs3 = io_in_uop_valid ? io_in_uop_bits_prs3 : slot_uop_prs3;
always @(posedge clock) begin
if (reset) begin
state <= 2'h0;
p1 <= 1'h0;
p2 <= 1'h0;
p3 <= 1'h0;
ppred <= 1'h0;
p1_poisoned <= 1'h0;
p2_poisoned <= 1'h0;
slot_uop_uopc <= 7'h0;
slot_uop_pdst <= 6'h0;
slot_uop_bypassable <= 1'h0;
slot_uop_uses_ldq <= 1'h0;
slot_uop_uses_stq <= 1'h0;
slot_uop_dst_rtype <= 2'h2;
slot_uop_fp_val <= 1'h0;
end
else begin
if (io_kill)
state <= 2'h0;
else if (io_in_uop_valid)
state <= io_in_uop_bits_iw_state;
else if (io_clear | _GEN_11)
state <= 2'h0;
else if (_GEN_1) begin
if (~_GEN_2)
state <= 2'h0;
end
else if (_GEN_3)
state <= 2'h1;
p1 <= _GEN_9 | io_wakeup_ports_2_valid & io_wakeup_ports_2_bits_pdst == next_uop_prs1 | io_wakeup_ports_1_valid & io_wakeup_ports_1_bits_pdst == next_uop_prs1 | io_wakeup_ports_0_valid & io_wakeup_ports_0_bits_pdst == next_uop_prs1 | ~_GEN_7 & (io_in_uop_valid ? ~io_in_uop_bits_prs1_busy : p1);
p2 <= _GEN_10 | io_wakeup_ports_2_valid & io_wakeup_ports_2_bits_pdst == next_uop_prs2 | io_wakeup_ports_1_valid & io_wakeup_ports_1_bits_pdst == next_uop_prs2 | io_wakeup_ports_0_valid & io_wakeup_ports_0_bits_pdst == next_uop_prs2 | ~_GEN_8 & (io_in_uop_valid ? ~io_in_uop_bits_prs2_busy : p2);
p3 <= io_wakeup_ports_2_valid & io_wakeup_ports_2_bits_pdst == next_uop_prs3 | io_wakeup_ports_1_valid & io_wakeup_ports_1_bits_pdst == next_uop_prs3 | io_wakeup_ports_0_valid & io_wakeup_ports_0_bits_pdst == next_uop_prs3 | (io_in_uop_valid ? ~io_in_uop_bits_prs3_busy : p3);
if (io_in_uop_valid) begin
ppred <= ~io_in_uop_bits_ppred_busy;
slot_uop_uopc <= io_in_uop_bits_uopc;
slot_uop_pdst <= io_in_uop_bits_pdst;
slot_uop_bypassable <= io_in_uop_bits_bypassable;
slot_uop_uses_ldq <= io_in_uop_bits_uses_ldq;
slot_uop_uses_stq <= io_in_uop_bits_uses_stq;
slot_uop_dst_rtype <= io_in_uop_bits_dst_rtype;
slot_uop_fp_val <= io_in_uop_bits_fp_val;
end
else if (_GEN_5) begin
end
else
slot_uop_uopc <= 7'h3;
p1_poisoned <= _GEN_9;
p2_poisoned <= _GEN_10;
end
if (io_in_uop_valid) begin
slot_uop_inst <= io_in_uop_bits_inst;
slot_uop_debug_inst <= io_in_uop_bits_debug_inst;
slot_uop_is_rvc <= io_in_uop_bits_is_rvc;
slot_uop_debug_pc <= io_in_uop_bits_debug_pc;
slot_uop_iq_type <= io_in_uop_bits_iq_type;
slot_uop_fu_code <= io_in_uop_bits_fu_code;
slot_uop_iw_state <= io_in_uop_bits_iw_state;
slot_uop_is_br <= io_in_uop_bits_is_br;
slot_uop_is_jalr <= io_in_uop_bits_is_jalr;
slot_uop_is_jal <= io_in_uop_bits_is_jal;
slot_uop_is_sfb <= io_in_uop_bits_is_sfb;
slot_uop_br_tag <= io_in_uop_bits_br_tag;
slot_uop_ftq_idx <= io_in_uop_bits_ftq_idx;
slot_uop_edge_inst <= io_in_uop_bits_edge_inst;
slot_uop_pc_lob <= io_in_uop_bits_pc_lob;
slot_uop_taken <= io_in_uop_bits_taken;
slot_uop_imm_packed <= io_in_uop_bits_imm_packed;
slot_uop_csr_addr <= io_in_uop_bits_csr_addr;
slot_uop_rob_idx <= io_in_uop_bits_rob_idx;
slot_uop_ldq_idx <= io_in_uop_bits_ldq_idx;
slot_uop_stq_idx <= io_in_uop_bits_stq_idx;
slot_uop_rxq_idx <= io_in_uop_bits_rxq_idx;
slot_uop_prs1 <= io_in_uop_bits_prs1;
slot_uop_prs2 <= io_in_uop_bits_prs2;
slot_uop_prs3 <= io_in_uop_bits_prs3;
slot_uop_ppred <= io_in_uop_bits_ppred;
slot_uop_prs1_busy <= io_in_uop_bits_prs1_busy;
slot_uop_prs2_busy <= io_in_uop_bits_prs2_busy;
slot_uop_prs3_busy <= io_in_uop_bits_prs3_busy;
slot_uop_ppred_busy <= io_in_uop_bits_ppred_busy;
slot_uop_stale_pdst <= io_in_uop_bits_stale_pdst;
slot_uop_exception <= io_in_uop_bits_exception;
slot_uop_exc_cause <= io_in_uop_bits_exc_cause;
slot_uop_mem_cmd <= io_in_uop_bits_mem_cmd;
slot_uop_mem_size <= io_in_uop_bits_mem_size;
slot_uop_mem_signed <= io_in_uop_bits_mem_signed;
slot_uop_is_fence <= io_in_uop_bits_is_fence;
slot_uop_is_fencei <= io_in_uop_bits_is_fencei;
slot_uop_is_amo <= io_in_uop_bits_is_amo;
slot_uop_is_sys_pc2epc <= io_in_uop_bits_is_sys_pc2epc;
slot_uop_is_unique <= io_in_uop_bits_is_unique;
slot_uop_flush_on_commit <= io_in_uop_bits_flush_on_commit;
slot_uop_ldst_is_rs1 <= io_in_uop_bits_ldst_is_rs1;
slot_uop_ldst <= io_in_uop_bits_ldst;
slot_uop_lrs1 <= io_in_uop_bits_lrs1;
slot_uop_lrs2 <= io_in_uop_bits_lrs2;
slot_uop_lrs3 <= io_in_uop_bits_lrs3;
slot_uop_ldst_val <= io_in_uop_bits_ldst_val;
slot_uop_lrs1_rtype <= io_in_uop_bits_lrs1_rtype;
slot_uop_lrs2_rtype <= io_in_uop_bits_lrs2_rtype;
slot_uop_frs3_en <= io_in_uop_bits_frs3_en;
slot_uop_fp_single <= io_in_uop_bits_fp_single;
slot_uop_xcpt_pf_if <= io_in_uop_bits_xcpt_pf_if;
slot_uop_xcpt_ae_if <= io_in_uop_bits_xcpt_ae_if;
slot_uop_xcpt_ma_if <= io_in_uop_bits_xcpt_ma_if;
slot_uop_bp_debug_if <= io_in_uop_bits_bp_debug_if;
slot_uop_bp_xcpt_if <= io_in_uop_bits_bp_xcpt_if;
slot_uop_debug_fsrc <= io_in_uop_bits_debug_fsrc;
slot_uop_debug_tsrc <= io_in_uop_bits_debug_tsrc;
end
else begin
if (_GEN_5) begin
end
else
slot_uop_lrs1_rtype <= 2'h2;
if (_GEN_6) begin
end
else
slot_uop_lrs2_rtype <= 2'h2;
end
slot_uop_br_mask <= io_in_uop_valid ? io_in_uop_bits_br_mask : next_br_mask;
end
assign io_valid = |state;
assign io_will_be_valid = (|state) & ~(io_grant & (_may_vacate_T | _may_vacate_T_1 & p1 & p2 & ppred) & ~(io_ldspec_miss & (p1_poisoned | p2_poisoned)));
assign io_request = _may_vacate_T ? p1 & p2 & p3 & ppred & ~io_kill : _GEN & (p1 | p2) & ppred & ~io_kill;
assign io_out_uop_uopc = _GEN_5 ? slot_uop_uopc : 7'h3;
assign io_out_uop_inst = slot_uop_inst;
assign io_out_uop_debug_inst = slot_uop_debug_inst;
assign io_out_uop_is_rvc = slot_uop_is_rvc;
assign io_out_uop_debug_pc = slot_uop_debug_pc;
assign io_out_uop_iq_type = slot_uop_iq_type;
assign io_out_uop_fu_code = slot_uop_fu_code;
assign io_out_uop_iw_state = _GEN_11 ? 2'h0 : _GEN_1 ? (_GEN_2 ? state : 2'h0) : _GEN_3 ? 2'h1 : state;
assign io_out_uop_iw_p1_poisoned = p1_poisoned;
assign io_out_uop_iw_p2_poisoned = p2_poisoned;
assign io_out_uop_is_br = slot_uop_is_br;
assign io_out_uop_is_jalr = slot_uop_is_jalr;
assign io_out_uop_is_jal = slot_uop_is_jal;
assign io_out_uop_is_sfb = slot_uop_is_sfb;
assign io_out_uop_br_mask = next_br_mask;
assign io_out_uop_br_tag = slot_uop_br_tag;
assign io_out_uop_ftq_idx = slot_uop_ftq_idx;
assign io_out_uop_edge_inst = slot_uop_edge_inst;
assign io_out_uop_pc_lob = slot_uop_pc_lob;
assign io_out_uop_taken = slot_uop_taken;
assign io_out_uop_imm_packed = slot_uop_imm_packed;
assign io_out_uop_csr_addr = slot_uop_csr_addr;
assign io_out_uop_rob_idx = slot_uop_rob_idx;
assign io_out_uop_ldq_idx = slot_uop_ldq_idx;
assign io_out_uop_stq_idx = slot_uop_stq_idx;
assign io_out_uop_rxq_idx = slot_uop_rxq_idx;
assign io_out_uop_pdst = slot_uop_pdst;
assign io_out_uop_prs1 = slot_uop_prs1;
assign io_out_uop_prs2 = slot_uop_prs2;
assign io_out_uop_prs3 = slot_uop_prs3;
assign io_out_uop_ppred = slot_uop_ppred;
assign io_out_uop_prs1_busy = ~p1;
assign io_out_uop_prs2_busy = ~p2;
assign io_out_uop_prs3_busy = ~p3;
assign io_out_uop_ppred_busy = ~ppred;
assign io_out_uop_stale_pdst = slot_uop_stale_pdst;
assign io_out_uop_exception = slot_uop_exception;
assign io_out_uop_exc_cause = slot_uop_exc_cause;
assign io_out_uop_bypassable = slot_uop_bypassable;
assign io_out_uop_mem_cmd = slot_uop_mem_cmd;
assign io_out_uop_mem_size = slot_uop_mem_size;
assign io_out_uop_mem_signed = slot_uop_mem_signed;
assign io_out_uop_is_fence = slot_uop_is_fence;
assign io_out_uop_is_fencei = slot_uop_is_fencei;
assign io_out_uop_is_amo = slot_uop_is_amo;
assign io_out_uop_uses_ldq = slot_uop_uses_ldq;
assign io_out_uop_uses_stq = slot_uop_uses_stq;
assign io_out_uop_is_sys_pc2epc = slot_uop_is_sys_pc2epc;
assign io_out_uop_is_unique = slot_uop_is_unique;
assign io_out_uop_flush_on_commit = slot_uop_flush_on_commit;
assign io_out_uop_ldst_is_rs1 = slot_uop_ldst_is_rs1;
assign io_out_uop_ldst = slot_uop_ldst;
assign io_out_uop_lrs1 = slot_uop_lrs1;
assign io_out_uop_lrs2 = slot_uop_lrs2;
assign io_out_uop_lrs3 = slot_uop_lrs3;
assign io_out_uop_ldst_val = slot_uop_ldst_val;
assign io_out_uop_dst_rtype = slot_uop_dst_rtype;
assign io_out_uop_lrs1_rtype = _GEN_5 ? slot_uop_lrs1_rtype : 2'h2;
assign io_out_uop_lrs2_rtype = _GEN_6 ? slot_uop_lrs2_rtype : 2'h2;
assign io_out_uop_frs3_en = slot_uop_frs3_en;
assign io_out_uop_fp_val = slot_uop_fp_val;
assign io_out_uop_fp_single = slot_uop_fp_single;
assign io_out_uop_xcpt_pf_if = slot_uop_xcpt_pf_if;
assign io_out_uop_xcpt_ae_if = slot_uop_xcpt_ae_if;
assign io_out_uop_xcpt_ma_if = slot_uop_xcpt_ma_if;
assign io_out_uop_bp_debug_if = slot_uop_bp_debug_if;
assign io_out_uop_bp_xcpt_if = slot_uop_bp_xcpt_if;
assign io_out_uop_debug_fsrc = slot_uop_debug_fsrc;
assign io_out_uop_debug_tsrc = slot_uop_debug_tsrc;
assign io_uop_uopc = _GEN_14 ? slot_uop_uopc : 7'h3;
assign io_uop_inst = slot_uop_inst;
assign io_uop_debug_inst = slot_uop_debug_inst;
assign io_uop_is_rvc = slot_uop_is_rvc;
assign io_uop_debug_pc = slot_uop_debug_pc;
assign io_uop_iq_type = slot_uop_iq_type;
assign io_uop_fu_code = slot_uop_fu_code;
assign io_uop_iw_state = slot_uop_iw_state;
assign io_uop_iw_p1_poisoned = p1_poisoned;
assign io_uop_iw_p2_poisoned = p2_poisoned;
assign io_uop_is_br = slot_uop_is_br;
assign io_uop_is_jalr = slot_uop_is_jalr;
assign io_uop_is_jal = slot_uop_is_jal;
assign io_uop_is_sfb = slot_uop_is_sfb;
assign io_uop_br_mask = slot_uop_br_mask;
assign io_uop_br_tag = slot_uop_br_tag;
assign io_uop_ftq_idx = slot_uop_ftq_idx;
assign io_uop_edge_inst = slot_uop_edge_inst;
assign io_uop_pc_lob = slot_uop_pc_lob;
assign io_uop_taken = slot_uop_taken;
assign io_uop_imm_packed = slot_uop_imm_packed;
assign io_uop_csr_addr = slot_uop_csr_addr;
assign io_uop_rob_idx = slot_uop_rob_idx;
assign io_uop_ldq_idx = slot_uop_ldq_idx;
assign io_uop_stq_idx = slot_uop_stq_idx;
assign io_uop_rxq_idx = slot_uop_rxq_idx;
assign io_uop_pdst = slot_uop_pdst;
assign io_uop_prs1 = slot_uop_prs1;
assign io_uop_prs2 = slot_uop_prs2;
assign io_uop_prs3 = slot_uop_prs3;
assign io_uop_ppred = slot_uop_ppred;
assign io_uop_prs1_busy = slot_uop_prs1_busy;
assign io_uop_prs2_busy = slot_uop_prs2_busy;
assign io_uop_prs3_busy = slot_uop_prs3_busy;
assign io_uop_ppred_busy = slot_uop_ppred_busy;
assign io_uop_stale_pdst = slot_uop_stale_pdst;
assign io_uop_exception = slot_uop_exception;
assign io_uop_exc_cause = slot_uop_exc_cause;
assign io_uop_bypassable = slot_uop_bypassable;
assign io_uop_mem_cmd = slot_uop_mem_cmd;
assign io_uop_mem_size = slot_uop_mem_size;
assign io_uop_mem_signed = slot_uop_mem_signed;
assign io_uop_is_fence = slot_uop_is_fence;
assign io_uop_is_fencei = slot_uop_is_fencei;
assign io_uop_is_amo = slot_uop_is_amo;
assign io_uop_uses_ldq = slot_uop_uses_ldq;
assign io_uop_uses_stq = slot_uop_uses_stq;
assign io_uop_is_sys_pc2epc = slot_uop_is_sys_pc2epc;
assign io_uop_is_unique = slot_uop_is_unique;
assign io_uop_flush_on_commit = slot_uop_flush_on_commit;
assign io_uop_ldst_is_rs1 = slot_uop_ldst_is_rs1;
assign io_uop_ldst = slot_uop_ldst;
assign io_uop_lrs1 = slot_uop_lrs1;
assign io_uop_lrs2 = slot_uop_lrs2;
assign io_uop_lrs3 = slot_uop_lrs3;
assign io_uop_ldst_val = slot_uop_ldst_val;
assign io_uop_dst_rtype = slot_uop_dst_rtype;
assign io_uop_lrs1_rtype = _GEN_14 ? slot_uop_lrs1_rtype : 2'h2;
assign io_uop_lrs2_rtype = ~_may_vacate_T_1 | _GEN_12 | ~_GEN_13 ? slot_uop_lrs2_rtype : 2'h2;
assign io_uop_frs3_en = slot_uop_frs3_en;
assign io_uop_fp_val = slot_uop_fp_val;
assign io_uop_fp_single = slot_uop_fp_single;
assign io_uop_xcpt_pf_if = slot_uop_xcpt_pf_if;
assign io_uop_xcpt_ae_if = slot_uop_xcpt_ae_if;
assign io_uop_xcpt_ma_if = slot_uop_xcpt_ma_if;
assign io_uop_bp_debug_if = slot_uop_bp_debug_if;
assign io_uop_bp_xcpt_if = slot_uop_bp_xcpt_if;
assign io_uop_debug_fsrc = slot_uop_debug_fsrc;
assign io_uop_debug_tsrc = slot_uop_debug_tsrc;
endmodule |
Generate the Verilog code corresponding to this Chisel code //******************************************************************************
// Copyright (c) 2013 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Execution Units
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// The issue window schedules micro-ops onto a specific execution pipeline
// A given execution pipeline may contain multiple functional units; one or more
// read ports, and one or more writeports.
package boom.v3.exu
import scala.collection.mutable.{ArrayBuffer}
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.rocket.{BP}
import freechips.rocketchip.tile
import FUConstants._
import boom.v3.common._
import boom.v3.ifu.{GetPCFromFtqIO}
import boom.v3.util.{ImmGen, IsKilledByBranch, BranchKillableQueue, BoomCoreStringPrefix}
/**
* Response from Execution Unit. Bundles a MicroOp with data
*
* @param dataWidth width of the data coming from the execution unit
*/
class ExeUnitResp(val dataWidth: Int)(implicit p: Parameters) extends BoomBundle
with HasBoomUOP
{
val data = Bits(dataWidth.W)
val predicated = Bool() // Was this predicated off?
val fflags = new ValidIO(new FFlagsResp) // write fflags to ROB // TODO: Do this better
}
/**
* Floating Point flag response
*/
class FFlagsResp(implicit p: Parameters) extends BoomBundle
{
val uop = new MicroOp()
val flags = Bits(tile.FPConstants.FLAGS_SZ.W)
}
/**
* Abstract Top level Execution Unit that wraps lower level functional units to make a
* multi function execution unit.
*
* @param readsIrf does this exe unit need a integer regfile port
* @param writesIrf does this exe unit need a integer regfile port
* @param readsFrf does this exe unit need a integer regfile port
* @param writesFrf does this exe unit need a integer regfile port
* @param writesLlIrf does this exe unit need a integer regfile port
* @param writesLlFrf does this exe unit need a integer regfile port
* @param numBypassStages number of bypass ports for the exe unit
* @param dataWidth width of the data coming out of the exe unit
* @param bypassable is the exe unit able to be bypassed
* @param hasMem does the exe unit have a MemAddrCalcUnit
* @param hasCSR does the exe unit write to the CSRFile
* @param hasBrUnit does the exe unit have a branch unit
* @param hasAlu does the exe unit have a alu
* @param hasFpu does the exe unit have a fpu
* @param hasMul does the exe unit have a multiplier
* @param hasDiv does the exe unit have a divider
* @param hasFdiv does the exe unit have a FP divider
* @param hasIfpu does the exe unit have a int to FP unit
* @param hasFpiu does the exe unit have a FP to int unit
*/
abstract class ExecutionUnit(
val readsIrf : Boolean = false,
val writesIrf : Boolean = false,
val readsFrf : Boolean = false,
val writesFrf : Boolean = false,
val writesLlIrf : Boolean = false,
val writesLlFrf : Boolean = false,
val numBypassStages : Int,
val dataWidth : Int,
val bypassable : Boolean = false, // TODO make override def for code clarity
val alwaysBypassable : Boolean = false,
val hasMem : Boolean = false,
val hasCSR : Boolean = false,
val hasJmpUnit : Boolean = false,
val hasAlu : Boolean = false,
val hasFpu : Boolean = false,
val hasMul : Boolean = false,
val hasDiv : Boolean = false,
val hasFdiv : Boolean = false,
val hasIfpu : Boolean = false,
val hasFpiu : Boolean = false,
val hasRocc : Boolean = false
)(implicit p: Parameters) extends BoomModule
{
val io = IO(new Bundle {
val fu_types = Output(Bits(FUC_SZ.W))
val req = Flipped(new DecoupledIO(new FuncUnitReq(dataWidth)))
val iresp = if (writesIrf) new DecoupledIO(new ExeUnitResp(dataWidth)) else null
val fresp = if (writesFrf) new DecoupledIO(new ExeUnitResp(dataWidth)) else null
val ll_iresp = if (writesLlIrf) new DecoupledIO(new ExeUnitResp(dataWidth)) else null
val ll_fresp = if (writesLlFrf) new DecoupledIO(new ExeUnitResp(dataWidth)) else null
val bypass = Output(Vec(numBypassStages, Valid(new ExeUnitResp(dataWidth))))
val brupdate = Input(new BrUpdateInfo())
// only used by the rocc unit
val rocc = if (hasRocc) new RoCCShimCoreIO else null
// only used by the branch unit
val brinfo = if (hasAlu) Output(new BrResolutionInfo()) else null
val get_ftq_pc = if (hasJmpUnit) Flipped(new GetPCFromFtqIO()) else null
val status = Input(new freechips.rocketchip.rocket.MStatus())
// only used by the fpu unit
val fcsr_rm = if (hasFcsr) Input(Bits(tile.FPConstants.RM_SZ.W)) else null
// only used by the mem unit
val lsu_io = if (hasMem) Flipped(new boom.v3.lsu.LSUExeIO) else null
val bp = if (hasMem) Input(Vec(nBreakpoints, new BP)) else null
val mcontext = if (hasMem) Input(UInt(coreParams.mcontextWidth.W)) else null
val scontext = if (hasMem) Input(UInt(coreParams.scontextWidth.W)) else null
// TODO move this out of ExecutionUnit
val com_exception = if (hasMem || hasRocc) Input(Bool()) else null
})
io.req.ready := false.B
if (writesIrf) {
io.iresp.valid := false.B
io.iresp.bits := DontCare
io.iresp.bits.fflags.valid := false.B
io.iresp.bits.predicated := false.B
assert(io.iresp.ready)
}
if (writesLlIrf) {
io.ll_iresp.valid := false.B
io.ll_iresp.bits := DontCare
io.ll_iresp.bits.fflags.valid := false.B
io.ll_iresp.bits.predicated := false.B
}
if (writesFrf) {
io.fresp.valid := false.B
io.fresp.bits := DontCare
io.fresp.bits.fflags.valid := false.B
io.fresp.bits.predicated := false.B
assert(io.fresp.ready)
}
if (writesLlFrf) {
io.ll_fresp.valid := false.B
io.ll_fresp.bits := DontCare
io.ll_fresp.bits.fflags.valid := false.B
io.ll_fresp.bits.predicated := false.B
}
// TODO add "number of fflag ports", so we can properly account for FPU+Mem combinations
def hasFFlags : Boolean = hasFpu || hasFdiv
require ((hasFpu || hasFdiv) ^ (hasAlu || hasMul || hasMem || hasIfpu),
"[execute] we no longer support mixing FP and Integer functional units in the same exe unit.")
def hasFcsr = hasIfpu || hasFpu || hasFdiv
require (bypassable || !alwaysBypassable,
"[execute] an execution unit must be bypassable if it is always bypassable")
def supportedFuncUnits = {
new SupportedFuncUnits(
alu = hasAlu,
jmp = hasJmpUnit,
mem = hasMem,
muld = hasMul || hasDiv,
fpu = hasFpu,
csr = hasCSR,
fdiv = hasFdiv,
ifpu = hasIfpu)
}
}
/**
* ALU execution unit that can have a branch, alu, mul, div, int to FP,
* and memory unit.
*
* @param hasBrUnit does the exe unit have a branch unit
* @param hasCSR does the exe unit write to the CSRFile
* @param hasAlu does the exe unit have a alu
* @param hasMul does the exe unit have a multiplier
* @param hasDiv does the exe unit have a divider
* @param hasIfpu does the exe unit have a int to FP unit
* @param hasMem does the exe unit have a MemAddrCalcUnit
*/
class ALUExeUnit(
hasJmpUnit : Boolean = false,
hasCSR : Boolean = false,
hasAlu : Boolean = true,
hasMul : Boolean = false,
hasDiv : Boolean = false,
hasIfpu : Boolean = false,
hasMem : Boolean = false,
hasRocc : Boolean = false)
(implicit p: Parameters)
extends ExecutionUnit(
readsIrf = true,
writesIrf = hasAlu || hasMul || hasDiv,
writesLlIrf = hasMem || hasRocc,
writesLlFrf = (hasIfpu || hasMem) && p(tile.TileKey).core.fpu != None,
numBypassStages =
if (hasAlu && hasMul) 3 //TODO XXX p(tile.TileKey).core.imulLatency
else if (hasAlu) 1 else 0,
dataWidth = 64 + 1,
bypassable = hasAlu,
alwaysBypassable = hasAlu && !(hasMem || hasJmpUnit || hasMul || hasDiv || hasCSR || hasIfpu || hasRocc),
hasCSR = hasCSR,
hasJmpUnit = hasJmpUnit,
hasAlu = hasAlu,
hasMul = hasMul,
hasDiv = hasDiv,
hasIfpu = hasIfpu,
hasMem = hasMem,
hasRocc = hasRocc)
with freechips.rocketchip.rocket.constants.MemoryOpConstants
{
require(!(hasRocc && !hasCSR),
"RoCC needs to be shared with CSR unit")
require(!(hasMem && hasRocc),
"We do not support execution unit with both Mem and Rocc writebacks")
require(!(hasMem && hasIfpu),
"TODO. Currently do not support AluMemExeUnit with FP")
val out_str =
BoomCoreStringPrefix("==ExeUnit==") +
(if (hasAlu) BoomCoreStringPrefix(" - ALU") else "") +
(if (hasMul) BoomCoreStringPrefix(" - Mul") else "") +
(if (hasDiv) BoomCoreStringPrefix(" - Div") else "") +
(if (hasIfpu) BoomCoreStringPrefix(" - IFPU") else "") +
(if (hasMem) BoomCoreStringPrefix(" - Mem") else "") +
(if (hasRocc) BoomCoreStringPrefix(" - RoCC") else "")
override def toString: String = out_str.toString
val div_busy = WireInit(false.B)
val ifpu_busy = WireInit(false.B)
// The Functional Units --------------------
// Specifically the functional units with fast writeback to IRF
val iresp_fu_units = ArrayBuffer[FunctionalUnit]()
io.fu_types := Mux(hasAlu.B, FU_ALU, 0.U) |
Mux(hasMul.B, FU_MUL, 0.U) |
Mux(!div_busy && hasDiv.B, FU_DIV, 0.U) |
Mux(hasCSR.B, FU_CSR, 0.U) |
Mux(hasJmpUnit.B, FU_JMP, 0.U) |
Mux(!ifpu_busy && hasIfpu.B, FU_I2F, 0.U) |
Mux(hasMem.B, FU_MEM, 0.U)
// ALU Unit -------------------------------
var alu: ALUUnit = null
if (hasAlu) {
alu = Module(new ALUUnit(isJmpUnit = hasJmpUnit,
numStages = numBypassStages,
dataWidth = xLen))
alu.io.req.valid := (
io.req.valid &&
(io.req.bits.uop.fu_code === FU_ALU ||
io.req.bits.uop.fu_code === FU_JMP ||
(io.req.bits.uop.fu_code === FU_CSR && io.req.bits.uop.uopc =/= uopROCC)))
//ROCC Rocc Commands are taken by the RoCC unit
alu.io.req.bits.uop := io.req.bits.uop
alu.io.req.bits.kill := io.req.bits.kill
alu.io.req.bits.rs1_data := io.req.bits.rs1_data
alu.io.req.bits.rs2_data := io.req.bits.rs2_data
alu.io.req.bits.rs3_data := DontCare
alu.io.req.bits.pred_data := io.req.bits.pred_data
alu.io.resp.ready := DontCare
alu.io.brupdate := io.brupdate
iresp_fu_units += alu
// Bypassing only applies to ALU
io.bypass := alu.io.bypass
// branch unit is embedded inside the ALU
io.brinfo := alu.io.brinfo
if (hasJmpUnit) {
alu.io.get_ftq_pc <> io.get_ftq_pc
}
}
var rocc: RoCCShim = null
if (hasRocc) {
rocc = Module(new RoCCShim)
rocc.io.req.valid := io.req.valid && io.req.bits.uop.uopc === uopROCC
rocc.io.req.bits := DontCare
rocc.io.req.bits.uop := io.req.bits.uop
rocc.io.req.bits.kill := io.req.bits.kill
rocc.io.req.bits.rs1_data := io.req.bits.rs1_data
rocc.io.req.bits.rs2_data := io.req.bits.rs2_data
rocc.io.brupdate := io.brupdate // We should assert on this somewhere
rocc.io.status := io.status
rocc.io.exception := io.com_exception
io.rocc <> rocc.io.core
rocc.io.resp.ready := io.ll_iresp.ready
io.ll_iresp.valid := rocc.io.resp.valid
io.ll_iresp.bits.uop := rocc.io.resp.bits.uop
io.ll_iresp.bits.data := rocc.io.resp.bits.data
}
// Pipelined, IMul Unit ------------------
var imul: PipelinedMulUnit = null
if (hasMul) {
imul = Module(new PipelinedMulUnit(imulLatency, xLen))
imul.io <> DontCare
imul.io.req.valid := io.req.valid && io.req.bits.uop.fu_code_is(FU_MUL)
imul.io.req.bits.uop := io.req.bits.uop
imul.io.req.bits.rs1_data := io.req.bits.rs1_data
imul.io.req.bits.rs2_data := io.req.bits.rs2_data
imul.io.req.bits.kill := io.req.bits.kill
imul.io.brupdate := io.brupdate
iresp_fu_units += imul
}
var ifpu: IntToFPUnit = null
if (hasIfpu) {
ifpu = Module(new IntToFPUnit(latency=intToFpLatency))
ifpu.io.req <> io.req
ifpu.io.req.valid := io.req.valid && io.req.bits.uop.fu_code_is(FU_I2F)
ifpu.io.fcsr_rm := io.fcsr_rm
ifpu.io.brupdate <> io.brupdate
ifpu.io.resp.ready := DontCare
// buffer up results since we share write-port on integer regfile.
val queue = Module(new BranchKillableQueue(new ExeUnitResp(dataWidth),
entries = intToFpLatency + 3)) // TODO being overly conservative
queue.io.enq.valid := ifpu.io.resp.valid
queue.io.enq.bits.uop := ifpu.io.resp.bits.uop
queue.io.enq.bits.data := ifpu.io.resp.bits.data
queue.io.enq.bits.predicated := ifpu.io.resp.bits.predicated
queue.io.enq.bits.fflags := ifpu.io.resp.bits.fflags
queue.io.brupdate := io.brupdate
queue.io.flush := io.req.bits.kill
io.ll_fresp <> queue.io.deq
ifpu_busy := !(queue.io.empty)
assert (queue.io.enq.ready)
}
// Div/Rem Unit -----------------------
var div: DivUnit = null
val div_resp_val = WireInit(false.B)
if (hasDiv) {
div = Module(new DivUnit(xLen))
div.io <> DontCare
div.io.req.valid := io.req.valid && io.req.bits.uop.fu_code_is(FU_DIV) && hasDiv.B
div.io.req.bits.uop := io.req.bits.uop
div.io.req.bits.rs1_data := io.req.bits.rs1_data
div.io.req.bits.rs2_data := io.req.bits.rs2_data
div.io.brupdate := io.brupdate
div.io.req.bits.kill := io.req.bits.kill
// share write port with the pipelined units
div.io.resp.ready := !(iresp_fu_units.map(_.io.resp.valid).reduce(_|_))
div_resp_val := div.io.resp.valid
div_busy := !div.io.req.ready ||
(io.req.valid && io.req.bits.uop.fu_code_is(FU_DIV))
iresp_fu_units += div
}
// Mem Unit --------------------------
if (hasMem) {
require(!hasAlu)
val maddrcalc = Module(new MemAddrCalcUnit)
maddrcalc.io.req <> io.req
maddrcalc.io.req.valid := io.req.valid && io.req.bits.uop.fu_code_is(FU_MEM)
maddrcalc.io.brupdate <> io.brupdate
maddrcalc.io.status := io.status
maddrcalc.io.bp := io.bp
maddrcalc.io.mcontext := io.mcontext
maddrcalc.io.scontext := io.scontext
maddrcalc.io.resp.ready := DontCare
require(numBypassStages == 0)
io.lsu_io.req := maddrcalc.io.resp
io.ll_iresp <> io.lsu_io.iresp
if (usingFPU) {
io.ll_fresp <> io.lsu_io.fresp
}
}
// Outputs (Write Port #0) ---------------
if (writesIrf) {
io.iresp.valid := iresp_fu_units.map(_.io.resp.valid).reduce(_|_)
io.iresp.bits.uop := PriorityMux(iresp_fu_units.map(f =>
(f.io.resp.valid, f.io.resp.bits.uop)).toSeq)
io.iresp.bits.data := PriorityMux(iresp_fu_units.map(f =>
(f.io.resp.valid, f.io.resp.bits.data)).toSeq)
io.iresp.bits.predicated := PriorityMux(iresp_fu_units.map(f =>
(f.io.resp.valid, f.io.resp.bits.predicated)).toSeq)
// pulled out for critical path reasons
// TODO: Does this make sense as part of the iresp bundle?
if (hasAlu) {
io.iresp.bits.uop.csr_addr := ImmGen(alu.io.resp.bits.uop.imm_packed, IS_I).asUInt
io.iresp.bits.uop.ctrl.csr_cmd := alu.io.resp.bits.uop.ctrl.csr_cmd
}
}
assert ((PopCount(iresp_fu_units.map(_.io.resp.valid)) <= 1.U && !div_resp_val) ||
(PopCount(iresp_fu_units.map(_.io.resp.valid)) <= 2.U && (div_resp_val)),
"Multiple functional units are fighting over the write port.")
}
/**
* FPU-only unit, with optional second write-port for ToInt micro-ops.
*
* @param hasFpu does the exe unit have a fpu
* @param hasFdiv does the exe unit have a FP divider
* @param hasFpiu does the exe unit have a FP to int unit
*/
class FPUExeUnit(
hasFpu : Boolean = true,
hasFdiv : Boolean = false,
hasFpiu : Boolean = false
)
(implicit p: Parameters)
extends ExecutionUnit(
readsFrf = true,
writesFrf = true,
writesLlIrf = hasFpiu,
writesIrf = false,
numBypassStages = 0,
dataWidth = p(tile.TileKey).core.fpu.get.fLen + 1,
bypassable = false,
hasFpu = hasFpu,
hasFdiv = hasFdiv,
hasFpiu = hasFpiu) with tile.HasFPUParameters
{
val out_str =
BoomCoreStringPrefix("==ExeUnit==")
(if (hasFpu) BoomCoreStringPrefix("- FPU (Latency: " + dfmaLatency + ")") else "") +
(if (hasFdiv) BoomCoreStringPrefix("- FDiv/FSqrt") else "") +
(if (hasFpiu) BoomCoreStringPrefix("- FPIU (writes to Integer RF)") else "")
val fdiv_busy = WireInit(false.B)
val fpiu_busy = WireInit(false.B)
// The Functional Units --------------------
val fu_units = ArrayBuffer[FunctionalUnit]()
io.fu_types := Mux(hasFpu.B, FU_FPU, 0.U) |
Mux(!fdiv_busy && hasFdiv.B, FU_FDV, 0.U) |
Mux(!fpiu_busy && hasFpiu.B, FU_F2I, 0.U)
// FPU Unit -----------------------
var fpu: FPUUnit = null
val fpu_resp_val = WireInit(false.B)
val fpu_resp_fflags = Wire(new ValidIO(new FFlagsResp()))
fpu_resp_fflags.valid := false.B
if (hasFpu) {
fpu = Module(new FPUUnit())
fpu.io.req.valid := io.req.valid &&
(io.req.bits.uop.fu_code_is(FU_FPU) ||
io.req.bits.uop.fu_code_is(FU_F2I)) // TODO move to using a separate unit
fpu.io.req.bits.uop := io.req.bits.uop
fpu.io.req.bits.rs1_data := io.req.bits.rs1_data
fpu.io.req.bits.rs2_data := io.req.bits.rs2_data
fpu.io.req.bits.rs3_data := io.req.bits.rs3_data
fpu.io.req.bits.pred_data := false.B
fpu.io.req.bits.kill := io.req.bits.kill
fpu.io.fcsr_rm := io.fcsr_rm
fpu.io.brupdate := io.brupdate
fpu.io.resp.ready := DontCare
fpu_resp_val := fpu.io.resp.valid
fpu_resp_fflags := fpu.io.resp.bits.fflags
fu_units += fpu
}
// FDiv/FSqrt Unit -----------------------
var fdivsqrt: FDivSqrtUnit = null
val fdiv_resp_fflags = Wire(new ValidIO(new FFlagsResp()))
fdiv_resp_fflags := DontCare
fdiv_resp_fflags.valid := false.B
if (hasFdiv) {
fdivsqrt = Module(new FDivSqrtUnit())
fdivsqrt.io.req.valid := io.req.valid && io.req.bits.uop.fu_code_is(FU_FDV)
fdivsqrt.io.req.bits.uop := io.req.bits.uop
fdivsqrt.io.req.bits.rs1_data := io.req.bits.rs1_data
fdivsqrt.io.req.bits.rs2_data := io.req.bits.rs2_data
fdivsqrt.io.req.bits.rs3_data := DontCare
fdivsqrt.io.req.bits.pred_data := false.B
fdivsqrt.io.req.bits.kill := io.req.bits.kill
fdivsqrt.io.fcsr_rm := io.fcsr_rm
fdivsqrt.io.brupdate := io.brupdate
// share write port with the pipelined units
fdivsqrt.io.resp.ready := !(fu_units.map(_.io.resp.valid).reduce(_|_)) // TODO PERF will get blocked by fpiu.
fdiv_busy := !fdivsqrt.io.req.ready || (io.req.valid && io.req.bits.uop.fu_code_is(FU_FDV))
fdiv_resp_fflags := fdivsqrt.io.resp.bits.fflags
fu_units += fdivsqrt
}
// Outputs (Write Port #0) ---------------
io.fresp.valid := fu_units.map(_.io.resp.valid).reduce(_|_) &&
!(fpu.io.resp.valid && fpu.io.resp.bits.uop.fu_code_is(FU_F2I))
io.fresp.bits.uop := PriorityMux(fu_units.map(f => (f.io.resp.valid,
f.io.resp.bits.uop)).toSeq)
io.fresp.bits.data:= PriorityMux(fu_units.map(f => (f.io.resp.valid, f.io.resp.bits.data)).toSeq)
io.fresp.bits.fflags := Mux(fpu_resp_val, fpu_resp_fflags, fdiv_resp_fflags)
// Outputs (Write Port #1) -- FpToInt Queuing Unit -----------------------
if (hasFpiu) {
// TODO instantiate our own fpiu; and remove it from fpu.scala.
// buffer up results since we share write-port on integer regfile.
val queue = Module(new BranchKillableQueue(new ExeUnitResp(dataWidth),
entries = dfmaLatency + 3)) // TODO being overly conservative
queue.io.enq.valid := (fpu.io.resp.valid &&
fpu.io.resp.bits.uop.fu_code_is(FU_F2I) &&
fpu.io.resp.bits.uop.uopc =/= uopSTA) // STA means store data gen for floating point
queue.io.enq.bits.uop := fpu.io.resp.bits.uop
queue.io.enq.bits.data := fpu.io.resp.bits.data
queue.io.enq.bits.predicated := fpu.io.resp.bits.predicated
queue.io.enq.bits.fflags := fpu.io.resp.bits.fflags
queue.io.brupdate := io.brupdate
queue.io.flush := io.req.bits.kill
assert (queue.io.enq.ready) // If this backs up, we've miscalculated the size of the queue.
val fp_sdq = Module(new BranchKillableQueue(new ExeUnitResp(dataWidth),
entries = 3)) // Lets us backpressure floating point store data
fp_sdq.io.enq.valid := io.req.valid && io.req.bits.uop.uopc === uopSTA && !IsKilledByBranch(io.brupdate, io.req.bits.uop)
fp_sdq.io.enq.bits.uop := io.req.bits.uop
fp_sdq.io.enq.bits.data := ieee(io.req.bits.rs2_data)
fp_sdq.io.enq.bits.predicated := false.B
fp_sdq.io.enq.bits.fflags := DontCare
fp_sdq.io.brupdate := io.brupdate
fp_sdq.io.flush := io.req.bits.kill
assert(!(fp_sdq.io.enq.valid && !fp_sdq.io.enq.ready))
val resp_arb = Module(new Arbiter(new ExeUnitResp(dataWidth), 2))
resp_arb.io.in(0) <> queue.io.deq
resp_arb.io.in(1) <> fp_sdq.io.deq
io.ll_iresp <> resp_arb.io.out
fpiu_busy := !(queue.io.empty && fp_sdq.io.empty)
}
override def toString: String = out_str.toString
} | module ALUExeUnit_1(
input clock,
input reset,
output [9:0] io_fu_types,
input io_req_valid,
input [6:0] io_req_bits_uop_uopc,
input [31:0] io_req_bits_uop_inst,
input [31:0] io_req_bits_uop_debug_inst,
input io_req_bits_uop_is_rvc,
input [39:0] io_req_bits_uop_debug_pc,
input [2:0] io_req_bits_uop_iq_type,
input [9:0] io_req_bits_uop_fu_code,
input [3:0] io_req_bits_uop_ctrl_br_type,
input [1:0] io_req_bits_uop_ctrl_op1_sel,
input [2:0] io_req_bits_uop_ctrl_op2_sel,
input [2:0] io_req_bits_uop_ctrl_imm_sel,
input [4:0] io_req_bits_uop_ctrl_op_fcn,
input io_req_bits_uop_ctrl_fcn_dw,
input [2:0] io_req_bits_uop_ctrl_csr_cmd,
input io_req_bits_uop_ctrl_is_load,
input io_req_bits_uop_ctrl_is_sta,
input io_req_bits_uop_ctrl_is_std,
input [1:0] io_req_bits_uop_iw_state,
input io_req_bits_uop_is_br,
input io_req_bits_uop_is_jalr,
input io_req_bits_uop_is_jal,
input io_req_bits_uop_is_sfb,
input [7:0] io_req_bits_uop_br_mask,
input [2:0] io_req_bits_uop_br_tag,
input [3:0] io_req_bits_uop_ftq_idx,
input io_req_bits_uop_edge_inst,
input [5:0] io_req_bits_uop_pc_lob,
input io_req_bits_uop_taken,
input [19:0] io_req_bits_uop_imm_packed,
input [11:0] io_req_bits_uop_csr_addr,
input [4:0] io_req_bits_uop_rob_idx,
input [2:0] io_req_bits_uop_ldq_idx,
input [2:0] io_req_bits_uop_stq_idx,
input [1:0] io_req_bits_uop_rxq_idx,
input [5:0] io_req_bits_uop_pdst,
input [5:0] io_req_bits_uop_prs1,
input [5:0] io_req_bits_uop_prs2,
input [5:0] io_req_bits_uop_prs3,
input [3:0] io_req_bits_uop_ppred,
input io_req_bits_uop_prs1_busy,
input io_req_bits_uop_prs2_busy,
input io_req_bits_uop_prs3_busy,
input io_req_bits_uop_ppred_busy,
input [5:0] io_req_bits_uop_stale_pdst,
input io_req_bits_uop_exception,
input [63:0] io_req_bits_uop_exc_cause,
input io_req_bits_uop_bypassable,
input [4:0] io_req_bits_uop_mem_cmd,
input [1:0] io_req_bits_uop_mem_size,
input io_req_bits_uop_mem_signed,
input io_req_bits_uop_is_fence,
input io_req_bits_uop_is_fencei,
input io_req_bits_uop_is_amo,
input io_req_bits_uop_uses_ldq,
input io_req_bits_uop_uses_stq,
input io_req_bits_uop_is_sys_pc2epc,
input io_req_bits_uop_is_unique,
input io_req_bits_uop_flush_on_commit,
input io_req_bits_uop_ldst_is_rs1,
input [5:0] io_req_bits_uop_ldst,
input [5:0] io_req_bits_uop_lrs1,
input [5:0] io_req_bits_uop_lrs2,
input [5:0] io_req_bits_uop_lrs3,
input io_req_bits_uop_ldst_val,
input [1:0] io_req_bits_uop_dst_rtype,
input [1:0] io_req_bits_uop_lrs1_rtype,
input [1:0] io_req_bits_uop_lrs2_rtype,
input io_req_bits_uop_frs3_en,
input io_req_bits_uop_fp_val,
input io_req_bits_uop_fp_single,
input io_req_bits_uop_xcpt_pf_if,
input io_req_bits_uop_xcpt_ae_if,
input io_req_bits_uop_xcpt_ma_if,
input io_req_bits_uop_bp_debug_if,
input io_req_bits_uop_bp_xcpt_if,
input [1:0] io_req_bits_uop_debug_fsrc,
input [1:0] io_req_bits_uop_debug_tsrc,
input [64:0] io_req_bits_rs1_data,
input [64:0] io_req_bits_rs2_data,
input io_req_bits_kill,
output io_iresp_valid,
output [2:0] io_iresp_bits_uop_ctrl_csr_cmd,
output [11:0] io_iresp_bits_uop_csr_addr,
output [4:0] io_iresp_bits_uop_rob_idx,
output [5:0] io_iresp_bits_uop_pdst,
output io_iresp_bits_uop_bypassable,
output io_iresp_bits_uop_is_amo,
output io_iresp_bits_uop_uses_stq,
output [1:0] io_iresp_bits_uop_dst_rtype,
output [64:0] io_iresp_bits_data,
input io_ll_fresp_ready,
output io_ll_fresp_valid,
output [6:0] io_ll_fresp_bits_uop_uopc,
output [7:0] io_ll_fresp_bits_uop_br_mask,
output [4:0] io_ll_fresp_bits_uop_rob_idx,
output [2:0] io_ll_fresp_bits_uop_stq_idx,
output [5:0] io_ll_fresp_bits_uop_pdst,
output io_ll_fresp_bits_uop_is_amo,
output io_ll_fresp_bits_uop_uses_stq,
output [1:0] io_ll_fresp_bits_uop_dst_rtype,
output io_ll_fresp_bits_uop_fp_val,
output [64:0] io_ll_fresp_bits_data,
output io_ll_fresp_bits_predicated,
output io_ll_fresp_bits_fflags_valid,
output [4:0] io_ll_fresp_bits_fflags_bits_uop_rob_idx,
output [4:0] io_ll_fresp_bits_fflags_bits_flags,
output io_bypass_0_valid,
output [5:0] io_bypass_0_bits_uop_pdst,
output [1:0] io_bypass_0_bits_uop_dst_rtype,
output [64:0] io_bypass_0_bits_data,
output io_bypass_1_valid,
output [5:0] io_bypass_1_bits_uop_pdst,
output [1:0] io_bypass_1_bits_uop_dst_rtype,
output [64:0] io_bypass_1_bits_data,
output io_bypass_2_valid,
output [5:0] io_bypass_2_bits_uop_pdst,
output [1:0] io_bypass_2_bits_uop_dst_rtype,
output [64:0] io_bypass_2_bits_data,
input [7:0] io_brupdate_b1_resolve_mask,
input [7:0] io_brupdate_b1_mispredict_mask,
output io_brinfo_uop_is_rvc,
output [7:0] io_brinfo_uop_br_mask,
output [2:0] io_brinfo_uop_br_tag,
output [3:0] io_brinfo_uop_ftq_idx,
output io_brinfo_uop_edge_inst,
output [5:0] io_brinfo_uop_pc_lob,
output [4:0] io_brinfo_uop_rob_idx,
output [2:0] io_brinfo_uop_ldq_idx,
output [2:0] io_brinfo_uop_stq_idx,
output io_brinfo_valid,
output io_brinfo_mispredict,
output io_brinfo_taken,
output [2:0] io_brinfo_cfi_type,
output [1:0] io_brinfo_pc_sel,
output [39:0] io_brinfo_jalr_target,
output [20:0] io_brinfo_target_offset,
input io_get_ftq_pc_entry_cfi_idx_valid,
input [1:0] io_get_ftq_pc_entry_cfi_idx_bits,
input io_get_ftq_pc_entry_start_bank,
input [39:0] io_get_ftq_pc_pc,
input io_get_ftq_pc_next_val,
input [39:0] io_get_ftq_pc_next_pc,
input [2:0] io_fcsr_rm
);
wire div_busy;
wire _DivUnit_io_req_ready;
wire _DivUnit_io_resp_valid;
wire [4:0] _DivUnit_io_resp_bits_uop_rob_idx;
wire [5:0] _DivUnit_io_resp_bits_uop_pdst;
wire _DivUnit_io_resp_bits_uop_bypassable;
wire _DivUnit_io_resp_bits_uop_is_amo;
wire _DivUnit_io_resp_bits_uop_uses_stq;
wire [1:0] _DivUnit_io_resp_bits_uop_dst_rtype;
wire [63:0] _DivUnit_io_resp_bits_data;
wire _queue_io_enq_ready;
wire _queue_io_empty;
wire _IntToFPUnit_io_resp_valid;
wire [6:0] _IntToFPUnit_io_resp_bits_uop_uopc;
wire [31:0] _IntToFPUnit_io_resp_bits_uop_inst;
wire [31:0] _IntToFPUnit_io_resp_bits_uop_debug_inst;
wire _IntToFPUnit_io_resp_bits_uop_is_rvc;
wire [39:0] _IntToFPUnit_io_resp_bits_uop_debug_pc;
wire [2:0] _IntToFPUnit_io_resp_bits_uop_iq_type;
wire [9:0] _IntToFPUnit_io_resp_bits_uop_fu_code;
wire [3:0] _IntToFPUnit_io_resp_bits_uop_ctrl_br_type;
wire [1:0] _IntToFPUnit_io_resp_bits_uop_ctrl_op1_sel;
wire [2:0] _IntToFPUnit_io_resp_bits_uop_ctrl_op2_sel;
wire [2:0] _IntToFPUnit_io_resp_bits_uop_ctrl_imm_sel;
wire [4:0] _IntToFPUnit_io_resp_bits_uop_ctrl_op_fcn;
wire _IntToFPUnit_io_resp_bits_uop_ctrl_fcn_dw;
wire [2:0] _IntToFPUnit_io_resp_bits_uop_ctrl_csr_cmd;
wire _IntToFPUnit_io_resp_bits_uop_ctrl_is_load;
wire _IntToFPUnit_io_resp_bits_uop_ctrl_is_sta;
wire _IntToFPUnit_io_resp_bits_uop_ctrl_is_std;
wire [1:0] _IntToFPUnit_io_resp_bits_uop_iw_state;
wire _IntToFPUnit_io_resp_bits_uop_iw_p1_poisoned;
wire _IntToFPUnit_io_resp_bits_uop_iw_p2_poisoned;
wire _IntToFPUnit_io_resp_bits_uop_is_br;
wire _IntToFPUnit_io_resp_bits_uop_is_jalr;
wire _IntToFPUnit_io_resp_bits_uop_is_jal;
wire _IntToFPUnit_io_resp_bits_uop_is_sfb;
wire [7:0] _IntToFPUnit_io_resp_bits_uop_br_mask;
wire [2:0] _IntToFPUnit_io_resp_bits_uop_br_tag;
wire [3:0] _IntToFPUnit_io_resp_bits_uop_ftq_idx;
wire _IntToFPUnit_io_resp_bits_uop_edge_inst;
wire [5:0] _IntToFPUnit_io_resp_bits_uop_pc_lob;
wire _IntToFPUnit_io_resp_bits_uop_taken;
wire [19:0] _IntToFPUnit_io_resp_bits_uop_imm_packed;
wire [11:0] _IntToFPUnit_io_resp_bits_uop_csr_addr;
wire [4:0] _IntToFPUnit_io_resp_bits_uop_rob_idx;
wire [2:0] _IntToFPUnit_io_resp_bits_uop_ldq_idx;
wire [2:0] _IntToFPUnit_io_resp_bits_uop_stq_idx;
wire [1:0] _IntToFPUnit_io_resp_bits_uop_rxq_idx;
wire [5:0] _IntToFPUnit_io_resp_bits_uop_pdst;
wire [5:0] _IntToFPUnit_io_resp_bits_uop_prs1;
wire [5:0] _IntToFPUnit_io_resp_bits_uop_prs2;
wire [5:0] _IntToFPUnit_io_resp_bits_uop_prs3;
wire [3:0] _IntToFPUnit_io_resp_bits_uop_ppred;
wire _IntToFPUnit_io_resp_bits_uop_prs1_busy;
wire _IntToFPUnit_io_resp_bits_uop_prs2_busy;
wire _IntToFPUnit_io_resp_bits_uop_prs3_busy;
wire _IntToFPUnit_io_resp_bits_uop_ppred_busy;
wire [5:0] _IntToFPUnit_io_resp_bits_uop_stale_pdst;
wire _IntToFPUnit_io_resp_bits_uop_exception;
wire [63:0] _IntToFPUnit_io_resp_bits_uop_exc_cause;
wire _IntToFPUnit_io_resp_bits_uop_bypassable;
wire [4:0] _IntToFPUnit_io_resp_bits_uop_mem_cmd;
wire [1:0] _IntToFPUnit_io_resp_bits_uop_mem_size;
wire _IntToFPUnit_io_resp_bits_uop_mem_signed;
wire _IntToFPUnit_io_resp_bits_uop_is_fence;
wire _IntToFPUnit_io_resp_bits_uop_is_fencei;
wire _IntToFPUnit_io_resp_bits_uop_is_amo;
wire _IntToFPUnit_io_resp_bits_uop_uses_ldq;
wire _IntToFPUnit_io_resp_bits_uop_uses_stq;
wire _IntToFPUnit_io_resp_bits_uop_is_sys_pc2epc;
wire _IntToFPUnit_io_resp_bits_uop_is_unique;
wire _IntToFPUnit_io_resp_bits_uop_flush_on_commit;
wire _IntToFPUnit_io_resp_bits_uop_ldst_is_rs1;
wire [5:0] _IntToFPUnit_io_resp_bits_uop_ldst;
wire [5:0] _IntToFPUnit_io_resp_bits_uop_lrs1;
wire [5:0] _IntToFPUnit_io_resp_bits_uop_lrs2;
wire [5:0] _IntToFPUnit_io_resp_bits_uop_lrs3;
wire _IntToFPUnit_io_resp_bits_uop_ldst_val;
wire [1:0] _IntToFPUnit_io_resp_bits_uop_dst_rtype;
wire [1:0] _IntToFPUnit_io_resp_bits_uop_lrs1_rtype;
wire [1:0] _IntToFPUnit_io_resp_bits_uop_lrs2_rtype;
wire _IntToFPUnit_io_resp_bits_uop_frs3_en;
wire _IntToFPUnit_io_resp_bits_uop_fp_val;
wire _IntToFPUnit_io_resp_bits_uop_fp_single;
wire _IntToFPUnit_io_resp_bits_uop_xcpt_pf_if;
wire _IntToFPUnit_io_resp_bits_uop_xcpt_ae_if;
wire _IntToFPUnit_io_resp_bits_uop_xcpt_ma_if;
wire _IntToFPUnit_io_resp_bits_uop_bp_debug_if;
wire _IntToFPUnit_io_resp_bits_uop_bp_xcpt_if;
wire [1:0] _IntToFPUnit_io_resp_bits_uop_debug_fsrc;
wire [1:0] _IntToFPUnit_io_resp_bits_uop_debug_tsrc;
wire [64:0] _IntToFPUnit_io_resp_bits_data;
wire _IntToFPUnit_io_resp_bits_fflags_valid;
wire [6:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_uopc;
wire [31:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_inst;
wire [31:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_debug_inst;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_is_rvc;
wire [39:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_debug_pc;
wire [2:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_iq_type;
wire [9:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_fu_code;
wire [3:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_br_type;
wire [1:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_op1_sel;
wire [2:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_op2_sel;
wire [2:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_imm_sel;
wire [4:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_op_fcn;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_fcn_dw;
wire [2:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_csr_cmd;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_is_load;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_is_sta;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_is_std;
wire [1:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_iw_state;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_iw_p1_poisoned;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_iw_p2_poisoned;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_is_br;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_is_jalr;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_is_jal;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_is_sfb;
wire [7:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_br_mask;
wire [2:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_br_tag;
wire [3:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_ftq_idx;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_edge_inst;
wire [5:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_pc_lob;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_taken;
wire [19:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_imm_packed;
wire [11:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_csr_addr;
wire [4:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_rob_idx;
wire [2:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_ldq_idx;
wire [2:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_stq_idx;
wire [1:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_rxq_idx;
wire [5:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_pdst;
wire [5:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_prs1;
wire [5:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_prs2;
wire [5:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_prs3;
wire [3:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_ppred;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_prs1_busy;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_prs2_busy;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_prs3_busy;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_ppred_busy;
wire [5:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_stale_pdst;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_exception;
wire [63:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_exc_cause;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_bypassable;
wire [4:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_mem_cmd;
wire [1:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_mem_size;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_mem_signed;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_is_fence;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_is_fencei;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_is_amo;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_uses_ldq;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_uses_stq;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_is_sys_pc2epc;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_is_unique;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_flush_on_commit;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_ldst_is_rs1;
wire [5:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_ldst;
wire [5:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_lrs1;
wire [5:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_lrs2;
wire [5:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_lrs3;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_ldst_val;
wire [1:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_dst_rtype;
wire [1:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_lrs1_rtype;
wire [1:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_lrs2_rtype;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_frs3_en;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_fp_val;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_fp_single;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_xcpt_pf_if;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_xcpt_ae_if;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_xcpt_ma_if;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_bp_debug_if;
wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_bp_xcpt_if;
wire [1:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_debug_fsrc;
wire [1:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_debug_tsrc;
wire [4:0] _IntToFPUnit_io_resp_bits_fflags_bits_flags;
wire _PipelinedMulUnit_io_resp_valid;
wire [4:0] _PipelinedMulUnit_io_resp_bits_uop_rob_idx;
wire [5:0] _PipelinedMulUnit_io_resp_bits_uop_pdst;
wire _PipelinedMulUnit_io_resp_bits_uop_bypassable;
wire _PipelinedMulUnit_io_resp_bits_uop_is_amo;
wire _PipelinedMulUnit_io_resp_bits_uop_uses_stq;
wire [1:0] _PipelinedMulUnit_io_resp_bits_uop_dst_rtype;
wire [63:0] _PipelinedMulUnit_io_resp_bits_data;
wire _ALUUnit_io_resp_valid;
wire [19:0] _ALUUnit_io_resp_bits_uop_imm_packed;
wire [4:0] _ALUUnit_io_resp_bits_uop_rob_idx;
wire [5:0] _ALUUnit_io_resp_bits_uop_pdst;
wire _ALUUnit_io_resp_bits_uop_bypassable;
wire _ALUUnit_io_resp_bits_uop_is_amo;
wire _ALUUnit_io_resp_bits_uop_uses_stq;
wire [1:0] _ALUUnit_io_resp_bits_uop_dst_rtype;
wire [63:0] _ALUUnit_io_resp_bits_data;
wire [63:0] _ALUUnit_io_bypass_0_bits_data;
wire [63:0] _ALUUnit_io_bypass_1_bits_data;
wire [63:0] _ALUUnit_io_bypass_2_bits_data;
wire _io_iresp_valid_T = _ALUUnit_io_resp_valid | _PipelinedMulUnit_io_resp_valid;
assign div_busy = ~_DivUnit_io_req_ready | io_req_valid & io_req_bits_uop_fu_code[4];
ALUUnit ALUUnit (
.clock (clock),
.reset (reset),
.io_req_valid (io_req_valid & (io_req_bits_uop_fu_code == 10'h1 | io_req_bits_uop_fu_code == 10'h2 | io_req_bits_uop_fu_code == 10'h20 & io_req_bits_uop_uopc != 7'h6C)),
.io_req_bits_uop_uopc (io_req_bits_uop_uopc),
.io_req_bits_uop_is_rvc (io_req_bits_uop_is_rvc),
.io_req_bits_uop_ctrl_br_type (io_req_bits_uop_ctrl_br_type),
.io_req_bits_uop_ctrl_op1_sel (io_req_bits_uop_ctrl_op1_sel),
.io_req_bits_uop_ctrl_op2_sel (io_req_bits_uop_ctrl_op2_sel),
.io_req_bits_uop_ctrl_imm_sel (io_req_bits_uop_ctrl_imm_sel),
.io_req_bits_uop_ctrl_op_fcn (io_req_bits_uop_ctrl_op_fcn),
.io_req_bits_uop_ctrl_fcn_dw (io_req_bits_uop_ctrl_fcn_dw),
.io_req_bits_uop_ctrl_csr_cmd (io_req_bits_uop_ctrl_csr_cmd),
.io_req_bits_uop_is_br (io_req_bits_uop_is_br),
.io_req_bits_uop_is_jalr (io_req_bits_uop_is_jalr),
.io_req_bits_uop_is_jal (io_req_bits_uop_is_jal),
.io_req_bits_uop_is_sfb (io_req_bits_uop_is_sfb),
.io_req_bits_uop_br_mask (io_req_bits_uop_br_mask),
.io_req_bits_uop_br_tag (io_req_bits_uop_br_tag),
.io_req_bits_uop_ftq_idx (io_req_bits_uop_ftq_idx),
.io_req_bits_uop_edge_inst (io_req_bits_uop_edge_inst),
.io_req_bits_uop_pc_lob (io_req_bits_uop_pc_lob),
.io_req_bits_uop_taken (io_req_bits_uop_taken),
.io_req_bits_uop_imm_packed (io_req_bits_uop_imm_packed),
.io_req_bits_uop_rob_idx (io_req_bits_uop_rob_idx),
.io_req_bits_uop_ldq_idx (io_req_bits_uop_ldq_idx),
.io_req_bits_uop_stq_idx (io_req_bits_uop_stq_idx),
.io_req_bits_uop_pdst (io_req_bits_uop_pdst),
.io_req_bits_uop_prs1 (io_req_bits_uop_prs1),
.io_req_bits_uop_bypassable (io_req_bits_uop_bypassable),
.io_req_bits_uop_is_amo (io_req_bits_uop_is_amo),
.io_req_bits_uop_uses_stq (io_req_bits_uop_uses_stq),
.io_req_bits_uop_dst_rtype (io_req_bits_uop_dst_rtype),
.io_req_bits_rs1_data (io_req_bits_rs1_data[63:0]),
.io_req_bits_rs2_data (io_req_bits_rs2_data[63:0]),
.io_req_bits_kill (io_req_bits_kill),
.io_resp_valid (_ALUUnit_io_resp_valid),
.io_resp_bits_uop_ctrl_csr_cmd (io_iresp_bits_uop_ctrl_csr_cmd),
.io_resp_bits_uop_imm_packed (_ALUUnit_io_resp_bits_uop_imm_packed),
.io_resp_bits_uop_rob_idx (_ALUUnit_io_resp_bits_uop_rob_idx),
.io_resp_bits_uop_pdst (_ALUUnit_io_resp_bits_uop_pdst),
.io_resp_bits_uop_bypassable (_ALUUnit_io_resp_bits_uop_bypassable),
.io_resp_bits_uop_is_amo (_ALUUnit_io_resp_bits_uop_is_amo),
.io_resp_bits_uop_uses_stq (_ALUUnit_io_resp_bits_uop_uses_stq),
.io_resp_bits_uop_dst_rtype (_ALUUnit_io_resp_bits_uop_dst_rtype),
.io_resp_bits_data (_ALUUnit_io_resp_bits_data),
.io_brupdate_b1_resolve_mask (io_brupdate_b1_resolve_mask),
.io_brupdate_b1_mispredict_mask (io_brupdate_b1_mispredict_mask),
.io_bypass_0_valid (io_bypass_0_valid),
.io_bypass_0_bits_uop_pdst (io_bypass_0_bits_uop_pdst),
.io_bypass_0_bits_uop_dst_rtype (io_bypass_0_bits_uop_dst_rtype),
.io_bypass_0_bits_data (_ALUUnit_io_bypass_0_bits_data),
.io_bypass_1_valid (io_bypass_1_valid),
.io_bypass_1_bits_uop_pdst (io_bypass_1_bits_uop_pdst),
.io_bypass_1_bits_uop_dst_rtype (io_bypass_1_bits_uop_dst_rtype),
.io_bypass_1_bits_data (_ALUUnit_io_bypass_1_bits_data),
.io_bypass_2_valid (io_bypass_2_valid),
.io_bypass_2_bits_uop_pdst (io_bypass_2_bits_uop_pdst),
.io_bypass_2_bits_uop_dst_rtype (io_bypass_2_bits_uop_dst_rtype),
.io_bypass_2_bits_data (_ALUUnit_io_bypass_2_bits_data),
.io_brinfo_uop_is_rvc (io_brinfo_uop_is_rvc),
.io_brinfo_uop_br_mask (io_brinfo_uop_br_mask),
.io_brinfo_uop_br_tag (io_brinfo_uop_br_tag),
.io_brinfo_uop_ftq_idx (io_brinfo_uop_ftq_idx),
.io_brinfo_uop_edge_inst (io_brinfo_uop_edge_inst),
.io_brinfo_uop_pc_lob (io_brinfo_uop_pc_lob),
.io_brinfo_uop_rob_idx (io_brinfo_uop_rob_idx),
.io_brinfo_uop_ldq_idx (io_brinfo_uop_ldq_idx),
.io_brinfo_uop_stq_idx (io_brinfo_uop_stq_idx),
.io_brinfo_valid (io_brinfo_valid),
.io_brinfo_mispredict (io_brinfo_mispredict),
.io_brinfo_taken (io_brinfo_taken),
.io_brinfo_cfi_type (io_brinfo_cfi_type),
.io_brinfo_pc_sel (io_brinfo_pc_sel),
.io_brinfo_jalr_target (io_brinfo_jalr_target),
.io_brinfo_target_offset (io_brinfo_target_offset),
.io_get_ftq_pc_entry_cfi_idx_valid (io_get_ftq_pc_entry_cfi_idx_valid),
.io_get_ftq_pc_entry_cfi_idx_bits (io_get_ftq_pc_entry_cfi_idx_bits),
.io_get_ftq_pc_entry_start_bank (io_get_ftq_pc_entry_start_bank),
.io_get_ftq_pc_pc (io_get_ftq_pc_pc),
.io_get_ftq_pc_next_val (io_get_ftq_pc_next_val),
.io_get_ftq_pc_next_pc (io_get_ftq_pc_next_pc)
);
PipelinedMulUnit PipelinedMulUnit (
.clock (clock),
.reset (reset),
.io_req_valid (io_req_valid & io_req_bits_uop_fu_code[3]),
.io_req_bits_uop_ctrl_op_fcn (io_req_bits_uop_ctrl_op_fcn),
.io_req_bits_uop_ctrl_fcn_dw (io_req_bits_uop_ctrl_fcn_dw),
.io_req_bits_uop_br_mask (io_req_bits_uop_br_mask),
.io_req_bits_uop_rob_idx (io_req_bits_uop_rob_idx),
.io_req_bits_uop_pdst (io_req_bits_uop_pdst),
.io_req_bits_uop_bypassable (io_req_bits_uop_bypassable),
.io_req_bits_uop_is_amo (io_req_bits_uop_is_amo),
.io_req_bits_uop_uses_stq (io_req_bits_uop_uses_stq),
.io_req_bits_uop_dst_rtype (io_req_bits_uop_dst_rtype),
.io_req_bits_rs1_data (io_req_bits_rs1_data[63:0]),
.io_req_bits_rs2_data (io_req_bits_rs2_data[63:0]),
.io_req_bits_kill (io_req_bits_kill),
.io_resp_valid (_PipelinedMulUnit_io_resp_valid),
.io_resp_bits_uop_rob_idx (_PipelinedMulUnit_io_resp_bits_uop_rob_idx),
.io_resp_bits_uop_pdst (_PipelinedMulUnit_io_resp_bits_uop_pdst),
.io_resp_bits_uop_bypassable (_PipelinedMulUnit_io_resp_bits_uop_bypassable),
.io_resp_bits_uop_is_amo (_PipelinedMulUnit_io_resp_bits_uop_is_amo),
.io_resp_bits_uop_uses_stq (_PipelinedMulUnit_io_resp_bits_uop_uses_stq),
.io_resp_bits_uop_dst_rtype (_PipelinedMulUnit_io_resp_bits_uop_dst_rtype),
.io_resp_bits_data (_PipelinedMulUnit_io_resp_bits_data),
.io_brupdate_b1_resolve_mask (io_brupdate_b1_resolve_mask),
.io_brupdate_b1_mispredict_mask (io_brupdate_b1_mispredict_mask)
);
IntToFPUnit IntToFPUnit (
.clock (clock),
.reset (reset),
.io_req_valid (io_req_valid & io_req_bits_uop_fu_code[8]),
.io_req_bits_uop_uopc (io_req_bits_uop_uopc),
.io_req_bits_uop_inst (io_req_bits_uop_inst),
.io_req_bits_uop_debug_inst (io_req_bits_uop_debug_inst),
.io_req_bits_uop_is_rvc (io_req_bits_uop_is_rvc),
.io_req_bits_uop_debug_pc (io_req_bits_uop_debug_pc),
.io_req_bits_uop_iq_type (io_req_bits_uop_iq_type),
.io_req_bits_uop_fu_code (io_req_bits_uop_fu_code),
.io_req_bits_uop_ctrl_br_type (io_req_bits_uop_ctrl_br_type),
.io_req_bits_uop_ctrl_op1_sel (io_req_bits_uop_ctrl_op1_sel),
.io_req_bits_uop_ctrl_op2_sel (io_req_bits_uop_ctrl_op2_sel),
.io_req_bits_uop_ctrl_imm_sel (io_req_bits_uop_ctrl_imm_sel),
.io_req_bits_uop_ctrl_op_fcn (io_req_bits_uop_ctrl_op_fcn),
.io_req_bits_uop_ctrl_fcn_dw (io_req_bits_uop_ctrl_fcn_dw),
.io_req_bits_uop_ctrl_csr_cmd (io_req_bits_uop_ctrl_csr_cmd),
.io_req_bits_uop_ctrl_is_load (io_req_bits_uop_ctrl_is_load),
.io_req_bits_uop_ctrl_is_sta (io_req_bits_uop_ctrl_is_sta),
.io_req_bits_uop_ctrl_is_std (io_req_bits_uop_ctrl_is_std),
.io_req_bits_uop_iw_state (io_req_bits_uop_iw_state),
.io_req_bits_uop_is_br (io_req_bits_uop_is_br),
.io_req_bits_uop_is_jalr (io_req_bits_uop_is_jalr),
.io_req_bits_uop_is_jal (io_req_bits_uop_is_jal),
.io_req_bits_uop_is_sfb (io_req_bits_uop_is_sfb),
.io_req_bits_uop_br_mask (io_req_bits_uop_br_mask),
.io_req_bits_uop_br_tag (io_req_bits_uop_br_tag),
.io_req_bits_uop_ftq_idx (io_req_bits_uop_ftq_idx),
.io_req_bits_uop_edge_inst (io_req_bits_uop_edge_inst),
.io_req_bits_uop_pc_lob (io_req_bits_uop_pc_lob),
.io_req_bits_uop_taken (io_req_bits_uop_taken),
.io_req_bits_uop_imm_packed (io_req_bits_uop_imm_packed),
.io_req_bits_uop_csr_addr (io_req_bits_uop_csr_addr),
.io_req_bits_uop_rob_idx (io_req_bits_uop_rob_idx),
.io_req_bits_uop_ldq_idx (io_req_bits_uop_ldq_idx),
.io_req_bits_uop_stq_idx (io_req_bits_uop_stq_idx),
.io_req_bits_uop_rxq_idx (io_req_bits_uop_rxq_idx),
.io_req_bits_uop_pdst (io_req_bits_uop_pdst),
.io_req_bits_uop_prs1 (io_req_bits_uop_prs1),
.io_req_bits_uop_prs2 (io_req_bits_uop_prs2),
.io_req_bits_uop_prs3 (io_req_bits_uop_prs3),
.io_req_bits_uop_ppred (io_req_bits_uop_ppred),
.io_req_bits_uop_prs1_busy (io_req_bits_uop_prs1_busy),
.io_req_bits_uop_prs2_busy (io_req_bits_uop_prs2_busy),
.io_req_bits_uop_prs3_busy (io_req_bits_uop_prs3_busy),
.io_req_bits_uop_ppred_busy (io_req_bits_uop_ppred_busy),
.io_req_bits_uop_stale_pdst (io_req_bits_uop_stale_pdst),
.io_req_bits_uop_exception (io_req_bits_uop_exception),
.io_req_bits_uop_exc_cause (io_req_bits_uop_exc_cause),
.io_req_bits_uop_bypassable (io_req_bits_uop_bypassable),
.io_req_bits_uop_mem_cmd (io_req_bits_uop_mem_cmd),
.io_req_bits_uop_mem_size (io_req_bits_uop_mem_size),
.io_req_bits_uop_mem_signed (io_req_bits_uop_mem_signed),
.io_req_bits_uop_is_fence (io_req_bits_uop_is_fence),
.io_req_bits_uop_is_fencei (io_req_bits_uop_is_fencei),
.io_req_bits_uop_is_amo (io_req_bits_uop_is_amo),
.io_req_bits_uop_uses_ldq (io_req_bits_uop_uses_ldq),
.io_req_bits_uop_uses_stq (io_req_bits_uop_uses_stq),
.io_req_bits_uop_is_sys_pc2epc (io_req_bits_uop_is_sys_pc2epc),
.io_req_bits_uop_is_unique (io_req_bits_uop_is_unique),
.io_req_bits_uop_flush_on_commit (io_req_bits_uop_flush_on_commit),
.io_req_bits_uop_ldst_is_rs1 (io_req_bits_uop_ldst_is_rs1),
.io_req_bits_uop_ldst (io_req_bits_uop_ldst),
.io_req_bits_uop_lrs1 (io_req_bits_uop_lrs1),
.io_req_bits_uop_lrs2 (io_req_bits_uop_lrs2),
.io_req_bits_uop_lrs3 (io_req_bits_uop_lrs3),
.io_req_bits_uop_ldst_val (io_req_bits_uop_ldst_val),
.io_req_bits_uop_dst_rtype (io_req_bits_uop_dst_rtype),
.io_req_bits_uop_lrs1_rtype (io_req_bits_uop_lrs1_rtype),
.io_req_bits_uop_lrs2_rtype (io_req_bits_uop_lrs2_rtype),
.io_req_bits_uop_frs3_en (io_req_bits_uop_frs3_en),
.io_req_bits_uop_fp_val (io_req_bits_uop_fp_val),
.io_req_bits_uop_fp_single (io_req_bits_uop_fp_single),
.io_req_bits_uop_xcpt_pf_if (io_req_bits_uop_xcpt_pf_if),
.io_req_bits_uop_xcpt_ae_if (io_req_bits_uop_xcpt_ae_if),
.io_req_bits_uop_xcpt_ma_if (io_req_bits_uop_xcpt_ma_if),
.io_req_bits_uop_bp_debug_if (io_req_bits_uop_bp_debug_if),
.io_req_bits_uop_bp_xcpt_if (io_req_bits_uop_bp_xcpt_if),
.io_req_bits_uop_debug_fsrc (io_req_bits_uop_debug_fsrc),
.io_req_bits_uop_debug_tsrc (io_req_bits_uop_debug_tsrc),
.io_req_bits_rs1_data (io_req_bits_rs1_data),
.io_req_bits_kill (io_req_bits_kill),
.io_resp_valid (_IntToFPUnit_io_resp_valid),
.io_resp_bits_uop_uopc (_IntToFPUnit_io_resp_bits_uop_uopc),
.io_resp_bits_uop_inst (_IntToFPUnit_io_resp_bits_uop_inst),
.io_resp_bits_uop_debug_inst (_IntToFPUnit_io_resp_bits_uop_debug_inst),
.io_resp_bits_uop_is_rvc (_IntToFPUnit_io_resp_bits_uop_is_rvc),
.io_resp_bits_uop_debug_pc (_IntToFPUnit_io_resp_bits_uop_debug_pc),
.io_resp_bits_uop_iq_type (_IntToFPUnit_io_resp_bits_uop_iq_type),
.io_resp_bits_uop_fu_code (_IntToFPUnit_io_resp_bits_uop_fu_code),
.io_resp_bits_uop_ctrl_br_type (_IntToFPUnit_io_resp_bits_uop_ctrl_br_type),
.io_resp_bits_uop_ctrl_op1_sel (_IntToFPUnit_io_resp_bits_uop_ctrl_op1_sel),
.io_resp_bits_uop_ctrl_op2_sel (_IntToFPUnit_io_resp_bits_uop_ctrl_op2_sel),
.io_resp_bits_uop_ctrl_imm_sel (_IntToFPUnit_io_resp_bits_uop_ctrl_imm_sel),
.io_resp_bits_uop_ctrl_op_fcn (_IntToFPUnit_io_resp_bits_uop_ctrl_op_fcn),
.io_resp_bits_uop_ctrl_fcn_dw (_IntToFPUnit_io_resp_bits_uop_ctrl_fcn_dw),
.io_resp_bits_uop_ctrl_csr_cmd (_IntToFPUnit_io_resp_bits_uop_ctrl_csr_cmd),
.io_resp_bits_uop_ctrl_is_load (_IntToFPUnit_io_resp_bits_uop_ctrl_is_load),
.io_resp_bits_uop_ctrl_is_sta (_IntToFPUnit_io_resp_bits_uop_ctrl_is_sta),
.io_resp_bits_uop_ctrl_is_std (_IntToFPUnit_io_resp_bits_uop_ctrl_is_std),
.io_resp_bits_uop_iw_state (_IntToFPUnit_io_resp_bits_uop_iw_state),
.io_resp_bits_uop_iw_p1_poisoned (_IntToFPUnit_io_resp_bits_uop_iw_p1_poisoned),
.io_resp_bits_uop_iw_p2_poisoned (_IntToFPUnit_io_resp_bits_uop_iw_p2_poisoned),
.io_resp_bits_uop_is_br (_IntToFPUnit_io_resp_bits_uop_is_br),
.io_resp_bits_uop_is_jalr (_IntToFPUnit_io_resp_bits_uop_is_jalr),
.io_resp_bits_uop_is_jal (_IntToFPUnit_io_resp_bits_uop_is_jal),
.io_resp_bits_uop_is_sfb (_IntToFPUnit_io_resp_bits_uop_is_sfb),
.io_resp_bits_uop_br_mask (_IntToFPUnit_io_resp_bits_uop_br_mask),
.io_resp_bits_uop_br_tag (_IntToFPUnit_io_resp_bits_uop_br_tag),
.io_resp_bits_uop_ftq_idx (_IntToFPUnit_io_resp_bits_uop_ftq_idx),
.io_resp_bits_uop_edge_inst (_IntToFPUnit_io_resp_bits_uop_edge_inst),
.io_resp_bits_uop_pc_lob (_IntToFPUnit_io_resp_bits_uop_pc_lob),
.io_resp_bits_uop_taken (_IntToFPUnit_io_resp_bits_uop_taken),
.io_resp_bits_uop_imm_packed (_IntToFPUnit_io_resp_bits_uop_imm_packed),
.io_resp_bits_uop_csr_addr (_IntToFPUnit_io_resp_bits_uop_csr_addr),
.io_resp_bits_uop_rob_idx (_IntToFPUnit_io_resp_bits_uop_rob_idx),
.io_resp_bits_uop_ldq_idx (_IntToFPUnit_io_resp_bits_uop_ldq_idx),
.io_resp_bits_uop_stq_idx (_IntToFPUnit_io_resp_bits_uop_stq_idx),
.io_resp_bits_uop_rxq_idx (_IntToFPUnit_io_resp_bits_uop_rxq_idx),
.io_resp_bits_uop_pdst (_IntToFPUnit_io_resp_bits_uop_pdst),
.io_resp_bits_uop_prs1 (_IntToFPUnit_io_resp_bits_uop_prs1),
.io_resp_bits_uop_prs2 (_IntToFPUnit_io_resp_bits_uop_prs2),
.io_resp_bits_uop_prs3 (_IntToFPUnit_io_resp_bits_uop_prs3),
.io_resp_bits_uop_ppred (_IntToFPUnit_io_resp_bits_uop_ppred),
.io_resp_bits_uop_prs1_busy (_IntToFPUnit_io_resp_bits_uop_prs1_busy),
.io_resp_bits_uop_prs2_busy (_IntToFPUnit_io_resp_bits_uop_prs2_busy),
.io_resp_bits_uop_prs3_busy (_IntToFPUnit_io_resp_bits_uop_prs3_busy),
.io_resp_bits_uop_ppred_busy (_IntToFPUnit_io_resp_bits_uop_ppred_busy),
.io_resp_bits_uop_stale_pdst (_IntToFPUnit_io_resp_bits_uop_stale_pdst),
.io_resp_bits_uop_exception (_IntToFPUnit_io_resp_bits_uop_exception),
.io_resp_bits_uop_exc_cause (_IntToFPUnit_io_resp_bits_uop_exc_cause),
.io_resp_bits_uop_bypassable (_IntToFPUnit_io_resp_bits_uop_bypassable),
.io_resp_bits_uop_mem_cmd (_IntToFPUnit_io_resp_bits_uop_mem_cmd),
.io_resp_bits_uop_mem_size (_IntToFPUnit_io_resp_bits_uop_mem_size),
.io_resp_bits_uop_mem_signed (_IntToFPUnit_io_resp_bits_uop_mem_signed),
.io_resp_bits_uop_is_fence (_IntToFPUnit_io_resp_bits_uop_is_fence),
.io_resp_bits_uop_is_fencei (_IntToFPUnit_io_resp_bits_uop_is_fencei),
.io_resp_bits_uop_is_amo (_IntToFPUnit_io_resp_bits_uop_is_amo),
.io_resp_bits_uop_uses_ldq (_IntToFPUnit_io_resp_bits_uop_uses_ldq),
.io_resp_bits_uop_uses_stq (_IntToFPUnit_io_resp_bits_uop_uses_stq),
.io_resp_bits_uop_is_sys_pc2epc (_IntToFPUnit_io_resp_bits_uop_is_sys_pc2epc),
.io_resp_bits_uop_is_unique (_IntToFPUnit_io_resp_bits_uop_is_unique),
.io_resp_bits_uop_flush_on_commit (_IntToFPUnit_io_resp_bits_uop_flush_on_commit),
.io_resp_bits_uop_ldst_is_rs1 (_IntToFPUnit_io_resp_bits_uop_ldst_is_rs1),
.io_resp_bits_uop_ldst (_IntToFPUnit_io_resp_bits_uop_ldst),
.io_resp_bits_uop_lrs1 (_IntToFPUnit_io_resp_bits_uop_lrs1),
.io_resp_bits_uop_lrs2 (_IntToFPUnit_io_resp_bits_uop_lrs2),
.io_resp_bits_uop_lrs3 (_IntToFPUnit_io_resp_bits_uop_lrs3),
.io_resp_bits_uop_ldst_val (_IntToFPUnit_io_resp_bits_uop_ldst_val),
.io_resp_bits_uop_dst_rtype (_IntToFPUnit_io_resp_bits_uop_dst_rtype),
.io_resp_bits_uop_lrs1_rtype (_IntToFPUnit_io_resp_bits_uop_lrs1_rtype),
.io_resp_bits_uop_lrs2_rtype (_IntToFPUnit_io_resp_bits_uop_lrs2_rtype),
.io_resp_bits_uop_frs3_en (_IntToFPUnit_io_resp_bits_uop_frs3_en),
.io_resp_bits_uop_fp_val (_IntToFPUnit_io_resp_bits_uop_fp_val),
.io_resp_bits_uop_fp_single (_IntToFPUnit_io_resp_bits_uop_fp_single),
.io_resp_bits_uop_xcpt_pf_if (_IntToFPUnit_io_resp_bits_uop_xcpt_pf_if),
.io_resp_bits_uop_xcpt_ae_if (_IntToFPUnit_io_resp_bits_uop_xcpt_ae_if),
.io_resp_bits_uop_xcpt_ma_if (_IntToFPUnit_io_resp_bits_uop_xcpt_ma_if),
.io_resp_bits_uop_bp_debug_if (_IntToFPUnit_io_resp_bits_uop_bp_debug_if),
.io_resp_bits_uop_bp_xcpt_if (_IntToFPUnit_io_resp_bits_uop_bp_xcpt_if),
.io_resp_bits_uop_debug_fsrc (_IntToFPUnit_io_resp_bits_uop_debug_fsrc),
.io_resp_bits_uop_debug_tsrc (_IntToFPUnit_io_resp_bits_uop_debug_tsrc),
.io_resp_bits_data (_IntToFPUnit_io_resp_bits_data),
.io_resp_bits_fflags_valid (_IntToFPUnit_io_resp_bits_fflags_valid),
.io_resp_bits_fflags_bits_uop_uopc (_IntToFPUnit_io_resp_bits_fflags_bits_uop_uopc),
.io_resp_bits_fflags_bits_uop_inst (_IntToFPUnit_io_resp_bits_fflags_bits_uop_inst),
.io_resp_bits_fflags_bits_uop_debug_inst (_IntToFPUnit_io_resp_bits_fflags_bits_uop_debug_inst),
.io_resp_bits_fflags_bits_uop_is_rvc (_IntToFPUnit_io_resp_bits_fflags_bits_uop_is_rvc),
.io_resp_bits_fflags_bits_uop_debug_pc (_IntToFPUnit_io_resp_bits_fflags_bits_uop_debug_pc),
.io_resp_bits_fflags_bits_uop_iq_type (_IntToFPUnit_io_resp_bits_fflags_bits_uop_iq_type),
.io_resp_bits_fflags_bits_uop_fu_code (_IntToFPUnit_io_resp_bits_fflags_bits_uop_fu_code),
.io_resp_bits_fflags_bits_uop_ctrl_br_type (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_br_type),
.io_resp_bits_fflags_bits_uop_ctrl_op1_sel (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_op1_sel),
.io_resp_bits_fflags_bits_uop_ctrl_op2_sel (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_op2_sel),
.io_resp_bits_fflags_bits_uop_ctrl_imm_sel (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_imm_sel),
.io_resp_bits_fflags_bits_uop_ctrl_op_fcn (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_op_fcn),
.io_resp_bits_fflags_bits_uop_ctrl_fcn_dw (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_fcn_dw),
.io_resp_bits_fflags_bits_uop_ctrl_csr_cmd (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_csr_cmd),
.io_resp_bits_fflags_bits_uop_ctrl_is_load (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_is_load),
.io_resp_bits_fflags_bits_uop_ctrl_is_sta (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_is_sta),
.io_resp_bits_fflags_bits_uop_ctrl_is_std (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_is_std),
.io_resp_bits_fflags_bits_uop_iw_state (_IntToFPUnit_io_resp_bits_fflags_bits_uop_iw_state),
.io_resp_bits_fflags_bits_uop_iw_p1_poisoned (_IntToFPUnit_io_resp_bits_fflags_bits_uop_iw_p1_poisoned),
.io_resp_bits_fflags_bits_uop_iw_p2_poisoned (_IntToFPUnit_io_resp_bits_fflags_bits_uop_iw_p2_poisoned),
.io_resp_bits_fflags_bits_uop_is_br (_IntToFPUnit_io_resp_bits_fflags_bits_uop_is_br),
.io_resp_bits_fflags_bits_uop_is_jalr (_IntToFPUnit_io_resp_bits_fflags_bits_uop_is_jalr),
.io_resp_bits_fflags_bits_uop_is_jal (_IntToFPUnit_io_resp_bits_fflags_bits_uop_is_jal),
.io_resp_bits_fflags_bits_uop_is_sfb (_IntToFPUnit_io_resp_bits_fflags_bits_uop_is_sfb),
.io_resp_bits_fflags_bits_uop_br_mask (_IntToFPUnit_io_resp_bits_fflags_bits_uop_br_mask),
.io_resp_bits_fflags_bits_uop_br_tag (_IntToFPUnit_io_resp_bits_fflags_bits_uop_br_tag),
.io_resp_bits_fflags_bits_uop_ftq_idx (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ftq_idx),
.io_resp_bits_fflags_bits_uop_edge_inst (_IntToFPUnit_io_resp_bits_fflags_bits_uop_edge_inst),
.io_resp_bits_fflags_bits_uop_pc_lob (_IntToFPUnit_io_resp_bits_fflags_bits_uop_pc_lob),
.io_resp_bits_fflags_bits_uop_taken (_IntToFPUnit_io_resp_bits_fflags_bits_uop_taken),
.io_resp_bits_fflags_bits_uop_imm_packed (_IntToFPUnit_io_resp_bits_fflags_bits_uop_imm_packed),
.io_resp_bits_fflags_bits_uop_csr_addr (_IntToFPUnit_io_resp_bits_fflags_bits_uop_csr_addr),
.io_resp_bits_fflags_bits_uop_rob_idx (_IntToFPUnit_io_resp_bits_fflags_bits_uop_rob_idx),
.io_resp_bits_fflags_bits_uop_ldq_idx (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ldq_idx),
.io_resp_bits_fflags_bits_uop_stq_idx (_IntToFPUnit_io_resp_bits_fflags_bits_uop_stq_idx),
.io_resp_bits_fflags_bits_uop_rxq_idx (_IntToFPUnit_io_resp_bits_fflags_bits_uop_rxq_idx),
.io_resp_bits_fflags_bits_uop_pdst (_IntToFPUnit_io_resp_bits_fflags_bits_uop_pdst),
.io_resp_bits_fflags_bits_uop_prs1 (_IntToFPUnit_io_resp_bits_fflags_bits_uop_prs1),
.io_resp_bits_fflags_bits_uop_prs2 (_IntToFPUnit_io_resp_bits_fflags_bits_uop_prs2),
.io_resp_bits_fflags_bits_uop_prs3 (_IntToFPUnit_io_resp_bits_fflags_bits_uop_prs3),
.io_resp_bits_fflags_bits_uop_ppred (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ppred),
.io_resp_bits_fflags_bits_uop_prs1_busy (_IntToFPUnit_io_resp_bits_fflags_bits_uop_prs1_busy),
.io_resp_bits_fflags_bits_uop_prs2_busy (_IntToFPUnit_io_resp_bits_fflags_bits_uop_prs2_busy),
.io_resp_bits_fflags_bits_uop_prs3_busy (_IntToFPUnit_io_resp_bits_fflags_bits_uop_prs3_busy),
.io_resp_bits_fflags_bits_uop_ppred_busy (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ppred_busy),
.io_resp_bits_fflags_bits_uop_stale_pdst (_IntToFPUnit_io_resp_bits_fflags_bits_uop_stale_pdst),
.io_resp_bits_fflags_bits_uop_exception (_IntToFPUnit_io_resp_bits_fflags_bits_uop_exception),
.io_resp_bits_fflags_bits_uop_exc_cause (_IntToFPUnit_io_resp_bits_fflags_bits_uop_exc_cause),
.io_resp_bits_fflags_bits_uop_bypassable (_IntToFPUnit_io_resp_bits_fflags_bits_uop_bypassable),
.io_resp_bits_fflags_bits_uop_mem_cmd (_IntToFPUnit_io_resp_bits_fflags_bits_uop_mem_cmd),
.io_resp_bits_fflags_bits_uop_mem_size (_IntToFPUnit_io_resp_bits_fflags_bits_uop_mem_size),
.io_resp_bits_fflags_bits_uop_mem_signed (_IntToFPUnit_io_resp_bits_fflags_bits_uop_mem_signed),
.io_resp_bits_fflags_bits_uop_is_fence (_IntToFPUnit_io_resp_bits_fflags_bits_uop_is_fence),
.io_resp_bits_fflags_bits_uop_is_fencei (_IntToFPUnit_io_resp_bits_fflags_bits_uop_is_fencei),
.io_resp_bits_fflags_bits_uop_is_amo (_IntToFPUnit_io_resp_bits_fflags_bits_uop_is_amo),
.io_resp_bits_fflags_bits_uop_uses_ldq (_IntToFPUnit_io_resp_bits_fflags_bits_uop_uses_ldq),
.io_resp_bits_fflags_bits_uop_uses_stq (_IntToFPUnit_io_resp_bits_fflags_bits_uop_uses_stq),
.io_resp_bits_fflags_bits_uop_is_sys_pc2epc (_IntToFPUnit_io_resp_bits_fflags_bits_uop_is_sys_pc2epc),
.io_resp_bits_fflags_bits_uop_is_unique (_IntToFPUnit_io_resp_bits_fflags_bits_uop_is_unique),
.io_resp_bits_fflags_bits_uop_flush_on_commit (_IntToFPUnit_io_resp_bits_fflags_bits_uop_flush_on_commit),
.io_resp_bits_fflags_bits_uop_ldst_is_rs1 (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ldst_is_rs1),
.io_resp_bits_fflags_bits_uop_ldst (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ldst),
.io_resp_bits_fflags_bits_uop_lrs1 (_IntToFPUnit_io_resp_bits_fflags_bits_uop_lrs1),
.io_resp_bits_fflags_bits_uop_lrs2 (_IntToFPUnit_io_resp_bits_fflags_bits_uop_lrs2),
.io_resp_bits_fflags_bits_uop_lrs3 (_IntToFPUnit_io_resp_bits_fflags_bits_uop_lrs3),
.io_resp_bits_fflags_bits_uop_ldst_val (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ldst_val),
.io_resp_bits_fflags_bits_uop_dst_rtype (_IntToFPUnit_io_resp_bits_fflags_bits_uop_dst_rtype),
.io_resp_bits_fflags_bits_uop_lrs1_rtype (_IntToFPUnit_io_resp_bits_fflags_bits_uop_lrs1_rtype),
.io_resp_bits_fflags_bits_uop_lrs2_rtype (_IntToFPUnit_io_resp_bits_fflags_bits_uop_lrs2_rtype),
.io_resp_bits_fflags_bits_uop_frs3_en (_IntToFPUnit_io_resp_bits_fflags_bits_uop_frs3_en),
.io_resp_bits_fflags_bits_uop_fp_val (_IntToFPUnit_io_resp_bits_fflags_bits_uop_fp_val),
.io_resp_bits_fflags_bits_uop_fp_single (_IntToFPUnit_io_resp_bits_fflags_bits_uop_fp_single),
.io_resp_bits_fflags_bits_uop_xcpt_pf_if (_IntToFPUnit_io_resp_bits_fflags_bits_uop_xcpt_pf_if),
.io_resp_bits_fflags_bits_uop_xcpt_ae_if (_IntToFPUnit_io_resp_bits_fflags_bits_uop_xcpt_ae_if),
.io_resp_bits_fflags_bits_uop_xcpt_ma_if (_IntToFPUnit_io_resp_bits_fflags_bits_uop_xcpt_ma_if),
.io_resp_bits_fflags_bits_uop_bp_debug_if (_IntToFPUnit_io_resp_bits_fflags_bits_uop_bp_debug_if),
.io_resp_bits_fflags_bits_uop_bp_xcpt_if (_IntToFPUnit_io_resp_bits_fflags_bits_uop_bp_xcpt_if),
.io_resp_bits_fflags_bits_uop_debug_fsrc (_IntToFPUnit_io_resp_bits_fflags_bits_uop_debug_fsrc),
.io_resp_bits_fflags_bits_uop_debug_tsrc (_IntToFPUnit_io_resp_bits_fflags_bits_uop_debug_tsrc),
.io_resp_bits_fflags_bits_flags (_IntToFPUnit_io_resp_bits_fflags_bits_flags),
.io_brupdate_b1_resolve_mask (io_brupdate_b1_resolve_mask),
.io_brupdate_b1_mispredict_mask (io_brupdate_b1_mispredict_mask),
.io_fcsr_rm (io_fcsr_rm)
);
BranchKillableQueue_3 queue (
.clock (clock),
.reset (reset),
.io_enq_ready (_queue_io_enq_ready),
.io_enq_valid (_IntToFPUnit_io_resp_valid),
.io_enq_bits_uop_uopc (_IntToFPUnit_io_resp_bits_uop_uopc),
.io_enq_bits_uop_inst (_IntToFPUnit_io_resp_bits_uop_inst),
.io_enq_bits_uop_debug_inst (_IntToFPUnit_io_resp_bits_uop_debug_inst),
.io_enq_bits_uop_is_rvc (_IntToFPUnit_io_resp_bits_uop_is_rvc),
.io_enq_bits_uop_debug_pc (_IntToFPUnit_io_resp_bits_uop_debug_pc),
.io_enq_bits_uop_iq_type (_IntToFPUnit_io_resp_bits_uop_iq_type),
.io_enq_bits_uop_fu_code (_IntToFPUnit_io_resp_bits_uop_fu_code),
.io_enq_bits_uop_ctrl_br_type (_IntToFPUnit_io_resp_bits_uop_ctrl_br_type),
.io_enq_bits_uop_ctrl_op1_sel (_IntToFPUnit_io_resp_bits_uop_ctrl_op1_sel),
.io_enq_bits_uop_ctrl_op2_sel (_IntToFPUnit_io_resp_bits_uop_ctrl_op2_sel),
.io_enq_bits_uop_ctrl_imm_sel (_IntToFPUnit_io_resp_bits_uop_ctrl_imm_sel),
.io_enq_bits_uop_ctrl_op_fcn (_IntToFPUnit_io_resp_bits_uop_ctrl_op_fcn),
.io_enq_bits_uop_ctrl_fcn_dw (_IntToFPUnit_io_resp_bits_uop_ctrl_fcn_dw),
.io_enq_bits_uop_ctrl_csr_cmd (_IntToFPUnit_io_resp_bits_uop_ctrl_csr_cmd),
.io_enq_bits_uop_ctrl_is_load (_IntToFPUnit_io_resp_bits_uop_ctrl_is_load),
.io_enq_bits_uop_ctrl_is_sta (_IntToFPUnit_io_resp_bits_uop_ctrl_is_sta),
.io_enq_bits_uop_ctrl_is_std (_IntToFPUnit_io_resp_bits_uop_ctrl_is_std),
.io_enq_bits_uop_iw_state (_IntToFPUnit_io_resp_bits_uop_iw_state),
.io_enq_bits_uop_iw_p1_poisoned (_IntToFPUnit_io_resp_bits_uop_iw_p1_poisoned),
.io_enq_bits_uop_iw_p2_poisoned (_IntToFPUnit_io_resp_bits_uop_iw_p2_poisoned),
.io_enq_bits_uop_is_br (_IntToFPUnit_io_resp_bits_uop_is_br),
.io_enq_bits_uop_is_jalr (_IntToFPUnit_io_resp_bits_uop_is_jalr),
.io_enq_bits_uop_is_jal (_IntToFPUnit_io_resp_bits_uop_is_jal),
.io_enq_bits_uop_is_sfb (_IntToFPUnit_io_resp_bits_uop_is_sfb),
.io_enq_bits_uop_br_mask (_IntToFPUnit_io_resp_bits_uop_br_mask),
.io_enq_bits_uop_br_tag (_IntToFPUnit_io_resp_bits_uop_br_tag),
.io_enq_bits_uop_ftq_idx (_IntToFPUnit_io_resp_bits_uop_ftq_idx),
.io_enq_bits_uop_edge_inst (_IntToFPUnit_io_resp_bits_uop_edge_inst),
.io_enq_bits_uop_pc_lob (_IntToFPUnit_io_resp_bits_uop_pc_lob),
.io_enq_bits_uop_taken (_IntToFPUnit_io_resp_bits_uop_taken),
.io_enq_bits_uop_imm_packed (_IntToFPUnit_io_resp_bits_uop_imm_packed),
.io_enq_bits_uop_csr_addr (_IntToFPUnit_io_resp_bits_uop_csr_addr),
.io_enq_bits_uop_rob_idx (_IntToFPUnit_io_resp_bits_uop_rob_idx),
.io_enq_bits_uop_ldq_idx (_IntToFPUnit_io_resp_bits_uop_ldq_idx),
.io_enq_bits_uop_stq_idx (_IntToFPUnit_io_resp_bits_uop_stq_idx),
.io_enq_bits_uop_rxq_idx (_IntToFPUnit_io_resp_bits_uop_rxq_idx),
.io_enq_bits_uop_pdst (_IntToFPUnit_io_resp_bits_uop_pdst),
.io_enq_bits_uop_prs1 (_IntToFPUnit_io_resp_bits_uop_prs1),
.io_enq_bits_uop_prs2 (_IntToFPUnit_io_resp_bits_uop_prs2),
.io_enq_bits_uop_prs3 (_IntToFPUnit_io_resp_bits_uop_prs3),
.io_enq_bits_uop_ppred (_IntToFPUnit_io_resp_bits_uop_ppred),
.io_enq_bits_uop_prs1_busy (_IntToFPUnit_io_resp_bits_uop_prs1_busy),
.io_enq_bits_uop_prs2_busy (_IntToFPUnit_io_resp_bits_uop_prs2_busy),
.io_enq_bits_uop_prs3_busy (_IntToFPUnit_io_resp_bits_uop_prs3_busy),
.io_enq_bits_uop_ppred_busy (_IntToFPUnit_io_resp_bits_uop_ppred_busy),
.io_enq_bits_uop_stale_pdst (_IntToFPUnit_io_resp_bits_uop_stale_pdst),
.io_enq_bits_uop_exception (_IntToFPUnit_io_resp_bits_uop_exception),
.io_enq_bits_uop_exc_cause (_IntToFPUnit_io_resp_bits_uop_exc_cause),
.io_enq_bits_uop_bypassable (_IntToFPUnit_io_resp_bits_uop_bypassable),
.io_enq_bits_uop_mem_cmd (_IntToFPUnit_io_resp_bits_uop_mem_cmd),
.io_enq_bits_uop_mem_size (_IntToFPUnit_io_resp_bits_uop_mem_size),
.io_enq_bits_uop_mem_signed (_IntToFPUnit_io_resp_bits_uop_mem_signed),
.io_enq_bits_uop_is_fence (_IntToFPUnit_io_resp_bits_uop_is_fence),
.io_enq_bits_uop_is_fencei (_IntToFPUnit_io_resp_bits_uop_is_fencei),
.io_enq_bits_uop_is_amo (_IntToFPUnit_io_resp_bits_uop_is_amo),
.io_enq_bits_uop_uses_ldq (_IntToFPUnit_io_resp_bits_uop_uses_ldq),
.io_enq_bits_uop_uses_stq (_IntToFPUnit_io_resp_bits_uop_uses_stq),
.io_enq_bits_uop_is_sys_pc2epc (_IntToFPUnit_io_resp_bits_uop_is_sys_pc2epc),
.io_enq_bits_uop_is_unique (_IntToFPUnit_io_resp_bits_uop_is_unique),
.io_enq_bits_uop_flush_on_commit (_IntToFPUnit_io_resp_bits_uop_flush_on_commit),
.io_enq_bits_uop_ldst_is_rs1 (_IntToFPUnit_io_resp_bits_uop_ldst_is_rs1),
.io_enq_bits_uop_ldst (_IntToFPUnit_io_resp_bits_uop_ldst),
.io_enq_bits_uop_lrs1 (_IntToFPUnit_io_resp_bits_uop_lrs1),
.io_enq_bits_uop_lrs2 (_IntToFPUnit_io_resp_bits_uop_lrs2),
.io_enq_bits_uop_lrs3 (_IntToFPUnit_io_resp_bits_uop_lrs3),
.io_enq_bits_uop_ldst_val (_IntToFPUnit_io_resp_bits_uop_ldst_val),
.io_enq_bits_uop_dst_rtype (_IntToFPUnit_io_resp_bits_uop_dst_rtype),
.io_enq_bits_uop_lrs1_rtype (_IntToFPUnit_io_resp_bits_uop_lrs1_rtype),
.io_enq_bits_uop_lrs2_rtype (_IntToFPUnit_io_resp_bits_uop_lrs2_rtype),
.io_enq_bits_uop_frs3_en (_IntToFPUnit_io_resp_bits_uop_frs3_en),
.io_enq_bits_uop_fp_val (_IntToFPUnit_io_resp_bits_uop_fp_val),
.io_enq_bits_uop_fp_single (_IntToFPUnit_io_resp_bits_uop_fp_single),
.io_enq_bits_uop_xcpt_pf_if (_IntToFPUnit_io_resp_bits_uop_xcpt_pf_if),
.io_enq_bits_uop_xcpt_ae_if (_IntToFPUnit_io_resp_bits_uop_xcpt_ae_if),
.io_enq_bits_uop_xcpt_ma_if (_IntToFPUnit_io_resp_bits_uop_xcpt_ma_if),
.io_enq_bits_uop_bp_debug_if (_IntToFPUnit_io_resp_bits_uop_bp_debug_if),
.io_enq_bits_uop_bp_xcpt_if (_IntToFPUnit_io_resp_bits_uop_bp_xcpt_if),
.io_enq_bits_uop_debug_fsrc (_IntToFPUnit_io_resp_bits_uop_debug_fsrc),
.io_enq_bits_uop_debug_tsrc (_IntToFPUnit_io_resp_bits_uop_debug_tsrc),
.io_enq_bits_data (_IntToFPUnit_io_resp_bits_data),
.io_enq_bits_fflags_valid (_IntToFPUnit_io_resp_bits_fflags_valid),
.io_enq_bits_fflags_bits_uop_uopc (_IntToFPUnit_io_resp_bits_fflags_bits_uop_uopc),
.io_enq_bits_fflags_bits_uop_inst (_IntToFPUnit_io_resp_bits_fflags_bits_uop_inst),
.io_enq_bits_fflags_bits_uop_debug_inst (_IntToFPUnit_io_resp_bits_fflags_bits_uop_debug_inst),
.io_enq_bits_fflags_bits_uop_is_rvc (_IntToFPUnit_io_resp_bits_fflags_bits_uop_is_rvc),
.io_enq_bits_fflags_bits_uop_debug_pc (_IntToFPUnit_io_resp_bits_fflags_bits_uop_debug_pc),
.io_enq_bits_fflags_bits_uop_iq_type (_IntToFPUnit_io_resp_bits_fflags_bits_uop_iq_type),
.io_enq_bits_fflags_bits_uop_fu_code (_IntToFPUnit_io_resp_bits_fflags_bits_uop_fu_code),
.io_enq_bits_fflags_bits_uop_ctrl_br_type (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_br_type),
.io_enq_bits_fflags_bits_uop_ctrl_op1_sel (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_op1_sel),
.io_enq_bits_fflags_bits_uop_ctrl_op2_sel (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_op2_sel),
.io_enq_bits_fflags_bits_uop_ctrl_imm_sel (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_imm_sel),
.io_enq_bits_fflags_bits_uop_ctrl_op_fcn (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_op_fcn),
.io_enq_bits_fflags_bits_uop_ctrl_fcn_dw (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_fcn_dw),
.io_enq_bits_fflags_bits_uop_ctrl_csr_cmd (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_csr_cmd),
.io_enq_bits_fflags_bits_uop_ctrl_is_load (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_is_load),
.io_enq_bits_fflags_bits_uop_ctrl_is_sta (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_is_sta),
.io_enq_bits_fflags_bits_uop_ctrl_is_std (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_is_std),
.io_enq_bits_fflags_bits_uop_iw_state (_IntToFPUnit_io_resp_bits_fflags_bits_uop_iw_state),
.io_enq_bits_fflags_bits_uop_iw_p1_poisoned (_IntToFPUnit_io_resp_bits_fflags_bits_uop_iw_p1_poisoned),
.io_enq_bits_fflags_bits_uop_iw_p2_poisoned (_IntToFPUnit_io_resp_bits_fflags_bits_uop_iw_p2_poisoned),
.io_enq_bits_fflags_bits_uop_is_br (_IntToFPUnit_io_resp_bits_fflags_bits_uop_is_br),
.io_enq_bits_fflags_bits_uop_is_jalr (_IntToFPUnit_io_resp_bits_fflags_bits_uop_is_jalr),
.io_enq_bits_fflags_bits_uop_is_jal (_IntToFPUnit_io_resp_bits_fflags_bits_uop_is_jal),
.io_enq_bits_fflags_bits_uop_is_sfb (_IntToFPUnit_io_resp_bits_fflags_bits_uop_is_sfb),
.io_enq_bits_fflags_bits_uop_br_mask (_IntToFPUnit_io_resp_bits_fflags_bits_uop_br_mask),
.io_enq_bits_fflags_bits_uop_br_tag (_IntToFPUnit_io_resp_bits_fflags_bits_uop_br_tag),
.io_enq_bits_fflags_bits_uop_ftq_idx (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ftq_idx),
.io_enq_bits_fflags_bits_uop_edge_inst (_IntToFPUnit_io_resp_bits_fflags_bits_uop_edge_inst),
.io_enq_bits_fflags_bits_uop_pc_lob (_IntToFPUnit_io_resp_bits_fflags_bits_uop_pc_lob),
.io_enq_bits_fflags_bits_uop_taken (_IntToFPUnit_io_resp_bits_fflags_bits_uop_taken),
.io_enq_bits_fflags_bits_uop_imm_packed (_IntToFPUnit_io_resp_bits_fflags_bits_uop_imm_packed),
.io_enq_bits_fflags_bits_uop_csr_addr (_IntToFPUnit_io_resp_bits_fflags_bits_uop_csr_addr),
.io_enq_bits_fflags_bits_uop_rob_idx (_IntToFPUnit_io_resp_bits_fflags_bits_uop_rob_idx),
.io_enq_bits_fflags_bits_uop_ldq_idx (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ldq_idx),
.io_enq_bits_fflags_bits_uop_stq_idx (_IntToFPUnit_io_resp_bits_fflags_bits_uop_stq_idx),
.io_enq_bits_fflags_bits_uop_rxq_idx (_IntToFPUnit_io_resp_bits_fflags_bits_uop_rxq_idx),
.io_enq_bits_fflags_bits_uop_pdst (_IntToFPUnit_io_resp_bits_fflags_bits_uop_pdst),
.io_enq_bits_fflags_bits_uop_prs1 (_IntToFPUnit_io_resp_bits_fflags_bits_uop_prs1),
.io_enq_bits_fflags_bits_uop_prs2 (_IntToFPUnit_io_resp_bits_fflags_bits_uop_prs2),
.io_enq_bits_fflags_bits_uop_prs3 (_IntToFPUnit_io_resp_bits_fflags_bits_uop_prs3),
.io_enq_bits_fflags_bits_uop_ppred (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ppred),
.io_enq_bits_fflags_bits_uop_prs1_busy (_IntToFPUnit_io_resp_bits_fflags_bits_uop_prs1_busy),
.io_enq_bits_fflags_bits_uop_prs2_busy (_IntToFPUnit_io_resp_bits_fflags_bits_uop_prs2_busy),
.io_enq_bits_fflags_bits_uop_prs3_busy (_IntToFPUnit_io_resp_bits_fflags_bits_uop_prs3_busy),
.io_enq_bits_fflags_bits_uop_ppred_busy (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ppred_busy),
.io_enq_bits_fflags_bits_uop_stale_pdst (_IntToFPUnit_io_resp_bits_fflags_bits_uop_stale_pdst),
.io_enq_bits_fflags_bits_uop_exception (_IntToFPUnit_io_resp_bits_fflags_bits_uop_exception),
.io_enq_bits_fflags_bits_uop_exc_cause (_IntToFPUnit_io_resp_bits_fflags_bits_uop_exc_cause),
.io_enq_bits_fflags_bits_uop_bypassable (_IntToFPUnit_io_resp_bits_fflags_bits_uop_bypassable),
.io_enq_bits_fflags_bits_uop_mem_cmd (_IntToFPUnit_io_resp_bits_fflags_bits_uop_mem_cmd),
.io_enq_bits_fflags_bits_uop_mem_size (_IntToFPUnit_io_resp_bits_fflags_bits_uop_mem_size),
.io_enq_bits_fflags_bits_uop_mem_signed (_IntToFPUnit_io_resp_bits_fflags_bits_uop_mem_signed),
.io_enq_bits_fflags_bits_uop_is_fence (_IntToFPUnit_io_resp_bits_fflags_bits_uop_is_fence),
.io_enq_bits_fflags_bits_uop_is_fencei (_IntToFPUnit_io_resp_bits_fflags_bits_uop_is_fencei),
.io_enq_bits_fflags_bits_uop_is_amo (_IntToFPUnit_io_resp_bits_fflags_bits_uop_is_amo),
.io_enq_bits_fflags_bits_uop_uses_ldq (_IntToFPUnit_io_resp_bits_fflags_bits_uop_uses_ldq),
.io_enq_bits_fflags_bits_uop_uses_stq (_IntToFPUnit_io_resp_bits_fflags_bits_uop_uses_stq),
.io_enq_bits_fflags_bits_uop_is_sys_pc2epc (_IntToFPUnit_io_resp_bits_fflags_bits_uop_is_sys_pc2epc),
.io_enq_bits_fflags_bits_uop_is_unique (_IntToFPUnit_io_resp_bits_fflags_bits_uop_is_unique),
.io_enq_bits_fflags_bits_uop_flush_on_commit (_IntToFPUnit_io_resp_bits_fflags_bits_uop_flush_on_commit),
.io_enq_bits_fflags_bits_uop_ldst_is_rs1 (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ldst_is_rs1),
.io_enq_bits_fflags_bits_uop_ldst (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ldst),
.io_enq_bits_fflags_bits_uop_lrs1 (_IntToFPUnit_io_resp_bits_fflags_bits_uop_lrs1),
.io_enq_bits_fflags_bits_uop_lrs2 (_IntToFPUnit_io_resp_bits_fflags_bits_uop_lrs2),
.io_enq_bits_fflags_bits_uop_lrs3 (_IntToFPUnit_io_resp_bits_fflags_bits_uop_lrs3),
.io_enq_bits_fflags_bits_uop_ldst_val (_IntToFPUnit_io_resp_bits_fflags_bits_uop_ldst_val),
.io_enq_bits_fflags_bits_uop_dst_rtype (_IntToFPUnit_io_resp_bits_fflags_bits_uop_dst_rtype),
.io_enq_bits_fflags_bits_uop_lrs1_rtype (_IntToFPUnit_io_resp_bits_fflags_bits_uop_lrs1_rtype),
.io_enq_bits_fflags_bits_uop_lrs2_rtype (_IntToFPUnit_io_resp_bits_fflags_bits_uop_lrs2_rtype),
.io_enq_bits_fflags_bits_uop_frs3_en (_IntToFPUnit_io_resp_bits_fflags_bits_uop_frs3_en),
.io_enq_bits_fflags_bits_uop_fp_val (_IntToFPUnit_io_resp_bits_fflags_bits_uop_fp_val),
.io_enq_bits_fflags_bits_uop_fp_single (_IntToFPUnit_io_resp_bits_fflags_bits_uop_fp_single),
.io_enq_bits_fflags_bits_uop_xcpt_pf_if (_IntToFPUnit_io_resp_bits_fflags_bits_uop_xcpt_pf_if),
.io_enq_bits_fflags_bits_uop_xcpt_ae_if (_IntToFPUnit_io_resp_bits_fflags_bits_uop_xcpt_ae_if),
.io_enq_bits_fflags_bits_uop_xcpt_ma_if (_IntToFPUnit_io_resp_bits_fflags_bits_uop_xcpt_ma_if),
.io_enq_bits_fflags_bits_uop_bp_debug_if (_IntToFPUnit_io_resp_bits_fflags_bits_uop_bp_debug_if),
.io_enq_bits_fflags_bits_uop_bp_xcpt_if (_IntToFPUnit_io_resp_bits_fflags_bits_uop_bp_xcpt_if),
.io_enq_bits_fflags_bits_uop_debug_fsrc (_IntToFPUnit_io_resp_bits_fflags_bits_uop_debug_fsrc),
.io_enq_bits_fflags_bits_uop_debug_tsrc (_IntToFPUnit_io_resp_bits_fflags_bits_uop_debug_tsrc),
.io_enq_bits_fflags_bits_flags (_IntToFPUnit_io_resp_bits_fflags_bits_flags),
.io_deq_ready (io_ll_fresp_ready),
.io_deq_valid (io_ll_fresp_valid),
.io_deq_bits_uop_uopc (io_ll_fresp_bits_uop_uopc),
.io_deq_bits_uop_br_mask (io_ll_fresp_bits_uop_br_mask),
.io_deq_bits_uop_rob_idx (io_ll_fresp_bits_uop_rob_idx),
.io_deq_bits_uop_stq_idx (io_ll_fresp_bits_uop_stq_idx),
.io_deq_bits_uop_pdst (io_ll_fresp_bits_uop_pdst),
.io_deq_bits_uop_is_amo (io_ll_fresp_bits_uop_is_amo),
.io_deq_bits_uop_uses_stq (io_ll_fresp_bits_uop_uses_stq),
.io_deq_bits_uop_dst_rtype (io_ll_fresp_bits_uop_dst_rtype),
.io_deq_bits_uop_fp_val (io_ll_fresp_bits_uop_fp_val),
.io_deq_bits_data (io_ll_fresp_bits_data),
.io_deq_bits_predicated (io_ll_fresp_bits_predicated),
.io_deq_bits_fflags_valid (io_ll_fresp_bits_fflags_valid),
.io_deq_bits_fflags_bits_uop_rob_idx (io_ll_fresp_bits_fflags_bits_uop_rob_idx),
.io_deq_bits_fflags_bits_flags (io_ll_fresp_bits_fflags_bits_flags),
.io_brupdate_b1_resolve_mask (io_brupdate_b1_resolve_mask),
.io_brupdate_b1_mispredict_mask (io_brupdate_b1_mispredict_mask),
.io_flush (io_req_bits_kill),
.io_empty (_queue_io_empty)
);
DivUnit DivUnit (
.clock (clock),
.reset (reset),
.io_req_ready (_DivUnit_io_req_ready),
.io_req_valid (io_req_valid & io_req_bits_uop_fu_code[4]),
.io_req_bits_uop_ctrl_op_fcn (io_req_bits_uop_ctrl_op_fcn),
.io_req_bits_uop_ctrl_fcn_dw (io_req_bits_uop_ctrl_fcn_dw),
.io_req_bits_uop_br_mask (io_req_bits_uop_br_mask),
.io_req_bits_uop_rob_idx (io_req_bits_uop_rob_idx),
.io_req_bits_uop_pdst (io_req_bits_uop_pdst),
.io_req_bits_uop_bypassable (io_req_bits_uop_bypassable),
.io_req_bits_uop_is_amo (io_req_bits_uop_is_amo),
.io_req_bits_uop_uses_stq (io_req_bits_uop_uses_stq),
.io_req_bits_uop_dst_rtype (io_req_bits_uop_dst_rtype),
.io_req_bits_rs1_data (io_req_bits_rs1_data[63:0]),
.io_req_bits_rs2_data (io_req_bits_rs2_data[63:0]),
.io_req_bits_kill (io_req_bits_kill),
.io_resp_ready (~_io_iresp_valid_T),
.io_resp_valid (_DivUnit_io_resp_valid),
.io_resp_bits_uop_rob_idx (_DivUnit_io_resp_bits_uop_rob_idx),
.io_resp_bits_uop_pdst (_DivUnit_io_resp_bits_uop_pdst),
.io_resp_bits_uop_bypassable (_DivUnit_io_resp_bits_uop_bypassable),
.io_resp_bits_uop_is_amo (_DivUnit_io_resp_bits_uop_is_amo),
.io_resp_bits_uop_uses_stq (_DivUnit_io_resp_bits_uop_uses_stq),
.io_resp_bits_uop_dst_rtype (_DivUnit_io_resp_bits_uop_dst_rtype),
.io_resp_bits_data (_DivUnit_io_resp_bits_data),
.io_brupdate_b1_resolve_mask (io_brupdate_b1_resolve_mask),
.io_brupdate_b1_mispredict_mask (io_brupdate_b1_mispredict_mask)
);
assign io_fu_types = {1'h0, _queue_io_empty, 3'h1, ~div_busy, 4'hB};
assign io_iresp_valid = _io_iresp_valid_T | _DivUnit_io_resp_valid;
assign io_iresp_bits_uop_csr_addr = _ALUUnit_io_resp_bits_uop_imm_packed[19:8];
assign io_iresp_bits_uop_rob_idx = _ALUUnit_io_resp_valid ? _ALUUnit_io_resp_bits_uop_rob_idx : _PipelinedMulUnit_io_resp_valid ? _PipelinedMulUnit_io_resp_bits_uop_rob_idx : _DivUnit_io_resp_bits_uop_rob_idx;
assign io_iresp_bits_uop_pdst = _ALUUnit_io_resp_valid ? _ALUUnit_io_resp_bits_uop_pdst : _PipelinedMulUnit_io_resp_valid ? _PipelinedMulUnit_io_resp_bits_uop_pdst : _DivUnit_io_resp_bits_uop_pdst;
assign io_iresp_bits_uop_bypassable = _ALUUnit_io_resp_valid ? _ALUUnit_io_resp_bits_uop_bypassable : _PipelinedMulUnit_io_resp_valid ? _PipelinedMulUnit_io_resp_bits_uop_bypassable : _DivUnit_io_resp_bits_uop_bypassable;
assign io_iresp_bits_uop_is_amo = _ALUUnit_io_resp_valid ? _ALUUnit_io_resp_bits_uop_is_amo : _PipelinedMulUnit_io_resp_valid ? _PipelinedMulUnit_io_resp_bits_uop_is_amo : _DivUnit_io_resp_bits_uop_is_amo;
assign io_iresp_bits_uop_uses_stq = _ALUUnit_io_resp_valid ? _ALUUnit_io_resp_bits_uop_uses_stq : _PipelinedMulUnit_io_resp_valid ? _PipelinedMulUnit_io_resp_bits_uop_uses_stq : _DivUnit_io_resp_bits_uop_uses_stq;
assign io_iresp_bits_uop_dst_rtype = _ALUUnit_io_resp_valid ? _ALUUnit_io_resp_bits_uop_dst_rtype : _PipelinedMulUnit_io_resp_valid ? _PipelinedMulUnit_io_resp_bits_uop_dst_rtype : _DivUnit_io_resp_bits_uop_dst_rtype;
assign io_iresp_bits_data = {1'h0, _ALUUnit_io_resp_valid ? _ALUUnit_io_resp_bits_data : _PipelinedMulUnit_io_resp_valid ? _PipelinedMulUnit_io_resp_bits_data : _DivUnit_io_resp_bits_data};
assign io_bypass_0_bits_data = {1'h0, _ALUUnit_io_bypass_0_bits_data};
assign io_bypass_1_bits_data = {1'h0, _ALUUnit_io_bypass_1_bits_data};
assign io_bypass_2_bits_data = {1'h0, _ALUUnit_io_bypass_2_bits_data};
endmodule |
Generate the Verilog code corresponding to this Chisel code //******************************************************************************
// Copyright (c) 2015 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
package boom.v3.exu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket.Instructions32
import freechips.rocketchip.rocket.CustomInstructions._
import freechips.rocketchip.rocket.RVCExpander
import freechips.rocketchip.rocket.{CSR,Causes}
import freechips.rocketchip.util.{uintToBitPat,UIntIsOneOf}
import FUConstants._
import boom.v3.common._
import boom.v3.util._
// scalastyle:off
/**
* Abstract trait giving defaults and other relevant values to different Decode constants/
*/
abstract trait DecodeConstants
extends freechips.rocketchip.rocket.constants.ScalarOpConstants
with freechips.rocketchip.rocket.constants.MemoryOpConstants
{
val xpr64 = Y // TODO inform this from xLen
val DC2 = BitPat.dontCare(2) // Makes the listing below more readable
def decode_default: List[BitPat] =
// frs3_en wakeup_delay
// is val inst? | imm sel | bypassable (aka, known/fixed latency)
// | is fp inst? | | uses_ldq | | is_br
// | | is single-prec? rs1 regtype | | | uses_stq | | |
// | | | micro-code | rs2 type| | | | is_amo | | |
// | | | | iq-type func unit | | | | | | | is_fence | | |
// | | | | | | | | | | | | | | is_fencei | | | is breakpoint or ecall?
// | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
// | | | | | | | | | | | | | | | | | | | | | | | |
List(N, N, X, uopX , IQT_INT, FU_X , RT_X , DC2 ,DC2 ,X, IS_X, X, X, X, X, N, M_X, DC2, X, X, N, N, X, CSR.X)
val table: Array[(BitPat, List[BitPat])]
}
// scalastyle:on
/**
* Decoded control signals
*/
class CtrlSigs extends Bundle
{
val legal = Bool()
val fp_val = Bool()
val fp_single = Bool()
val uopc = UInt(UOPC_SZ.W)
val iq_type = UInt(IQT_SZ.W)
val fu_code = UInt(FUC_SZ.W)
val dst_type = UInt(2.W)
val rs1_type = UInt(2.W)
val rs2_type = UInt(2.W)
val frs3_en = Bool()
val imm_sel = UInt(IS_X.getWidth.W)
val uses_ldq = Bool()
val uses_stq = Bool()
val is_amo = Bool()
val is_fence = Bool()
val is_fencei = Bool()
val mem_cmd = UInt(freechips.rocketchip.rocket.M_SZ.W)
val wakeup_delay = UInt(2.W)
val bypassable = Bool()
val is_br = Bool()
val is_sys_pc2epc = Bool()
val inst_unique = Bool()
val flush_on_commit = Bool()
val csr_cmd = UInt(freechips.rocketchip.rocket.CSR.SZ.W)
val rocc = Bool()
def decode(inst: UInt, table: Iterable[(BitPat, List[BitPat])]) = {
val decoder = freechips.rocketchip.rocket.DecodeLogic(inst, XDecode.decode_default, table)
val sigs =
Seq(legal, fp_val, fp_single, uopc, iq_type, fu_code, dst_type, rs1_type,
rs2_type, frs3_en, imm_sel, uses_ldq, uses_stq, is_amo,
is_fence, is_fencei, mem_cmd, wakeup_delay, bypassable,
is_br, is_sys_pc2epc, inst_unique, flush_on_commit, csr_cmd)
sigs zip decoder map {case(s,d) => s := d}
rocc := false.B
this
}
}
// scalastyle:off
/**
* Decode constants for RV32
*/
object X32Decode extends DecodeConstants
{
// frs3_en wakeup_delay
// is val inst? | imm sel | bypassable (aka, known/fixed latency)
// | is fp inst? | | uses_ldq | | is_br
// | | is single-prec? rs1 regtype | | | uses_stq | | |
// | | | micro-code | rs2 type| | | | is_amo | | |
// | | | | iq-type func unit | | | | | | | is_fence | | |
// | | | | | | | | | | | | | | is_fencei | | | is breakpoint or ecall?
// | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
val table: Array[(BitPat, List[BitPat])] = Array(// | | | | | | | | | | | | | | | | | |
Instructions32.SLLI ->
List(Y, N, X, uopSLLI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
Instructions32.SRLI ->
List(Y, N, X, uopSRLI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
Instructions32.SRAI ->
List(Y, N, X, uopSRAI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N)
)
}
/**
* Decode constants for RV64
*/
object X64Decode extends DecodeConstants
{
// frs3_en wakeup_delay
// is val inst? | imm sel | bypassable (aka, known/fixed latency)
// | is fp inst? | | uses_ldq | | is_br
// | | is single-prec? rs1 regtype | | | uses_stq | | |
// | | | micro-code | rs2 type| | | | is_amo | | |
// | | | | iq-type func unit | | | | | | | is_fence | | |
// | | | | | | | | | | | | | | is_fencei | | | is breakpoint or ecall?
// | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
val table: Array[(BitPat, List[BitPat])] = Array(// | | | | | | | | | | | | | | | | | |
LD -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N),
LWU -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N),
SD -> List(Y, N, X, uopSTA , IQT_MEM, FU_MEM , RT_X , RT_FIX, RT_FIX, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N),
SLLI -> List(Y, N, X, uopSLLI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRLI -> List(Y, N, X, uopSRLI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRAI -> List(Y, N, X, uopSRAI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
ADDIW -> List(Y, N, X, uopADDIW, IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SLLIW -> List(Y, N, X, uopSLLIW, IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRAIW -> List(Y, N, X, uopSRAIW, IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRLIW -> List(Y, N, X, uopSRLIW, IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
ADDW -> List(Y, N, X, uopADDW , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SUBW -> List(Y, N, X, uopSUBW , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SLLW -> List(Y, N, X, uopSLLW , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRAW -> List(Y, N, X, uopSRAW , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRLW -> List(Y, N, X, uopSRLW , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N)
)
}
/**
* Overall Decode constants
*/
object XDecode extends DecodeConstants
{
// frs3_en wakeup_delay
// is val inst? | imm sel | bypassable (aka, known/fixed latency)
// | is fp inst? | | uses_ldq | | is_br
// | | is single-prec? rs1 regtype | | | uses_stq | | |
// | | | micro-code | rs2 type| | | | is_amo | | |
// | | | | iq-type func unit | | | | | | | is_fence | | |
// | | | | | | | | | | | | | | is_fencei | | | is breakpoint or ecall?
// | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
val table: Array[(BitPat, List[BitPat])] = Array(// | | | | | | | | | | | | | | | | | |
LW -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N),
LH -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N),
LHU -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N),
LB -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N),
LBU -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N),
SW -> List(Y, N, X, uopSTA , IQT_MEM, FU_MEM , RT_X , RT_FIX, RT_FIX, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N),
SH -> List(Y, N, X, uopSTA , IQT_MEM, FU_MEM , RT_X , RT_FIX, RT_FIX, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N),
SB -> List(Y, N, X, uopSTA , IQT_MEM, FU_MEM , RT_X , RT_FIX, RT_FIX, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N),
LUI -> List(Y, N, X, uopLUI , IQT_INT, FU_ALU , RT_FIX, RT_X , RT_X , N, IS_U, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
ADDI -> List(Y, N, X, uopADDI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
ANDI -> List(Y, N, X, uopANDI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
ORI -> List(Y, N, X, uopORI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
XORI -> List(Y, N, X, uopXORI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SLTI -> List(Y, N, X, uopSLTI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SLTIU -> List(Y, N, X, uopSLTIU, IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SLL -> List(Y, N, X, uopSLL , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
ADD -> List(Y, N, X, uopADD , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SUB -> List(Y, N, X, uopSUB , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SLT -> List(Y, N, X, uopSLT , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SLTU -> List(Y, N, X, uopSLTU , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
AND -> List(Y, N, X, uopAND , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
OR -> List(Y, N, X, uopOR , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
XOR -> List(Y, N, X, uopXOR , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRA -> List(Y, N, X, uopSRA , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRL -> List(Y, N, X, uopSRL , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
MUL -> List(Y, N, X, uopMUL , IQT_INT, FU_MUL , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
MULH -> List(Y, N, X, uopMULH , IQT_INT, FU_MUL , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
MULHU -> List(Y, N, X, uopMULHU, IQT_INT, FU_MUL , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
MULHSU -> List(Y, N, X, uopMULHSU,IQT_INT, FU_MUL , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
MULW -> List(Y, N, X, uopMULW , IQT_INT, FU_MUL , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
DIV -> List(Y, N, X, uopDIV , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
DIVU -> List(Y, N, X, uopDIVU , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
REM -> List(Y, N, X, uopREM , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
REMU -> List(Y, N, X, uopREMU , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
DIVW -> List(Y, N, X, uopDIVW , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
DIVUW -> List(Y, N, X, uopDIVUW, IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
REMW -> List(Y, N, X, uopREMW , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
REMUW -> List(Y, N, X, uopREMUW, IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
AUIPC -> List(Y, N, X, uopAUIPC, IQT_INT, FU_JMP , RT_FIX, RT_X , RT_X , N, IS_U, N, N, N, N, N, M_X , 1.U, N, N, N, N, N, CSR.N), // use BRU for the PC read
JAL -> List(Y, N, X, uopJAL , IQT_INT, FU_JMP , RT_FIX, RT_X , RT_X , N, IS_J, N, N, N, N, N, M_X , 1.U, N, N, N, N, N, CSR.N),
JALR -> List(Y, N, X, uopJALR , IQT_INT, FU_JMP , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, N, N, N, N, N, CSR.N),
BEQ -> List(Y, N, X, uopBEQ , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N),
BNE -> List(Y, N, X, uopBNE , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N),
BGE -> List(Y, N, X, uopBGE , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N),
BGEU -> List(Y, N, X, uopBGEU , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N),
BLT -> List(Y, N, X, uopBLT , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N),
BLTU -> List(Y, N, X, uopBLTU , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N),
// I-type, the immediate12 holds the CSR register.
CSRRW -> List(Y, N, X, uopCSRRW, IQT_INT, FU_CSR , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.W),
CSRRS -> List(Y, N, X, uopCSRRS, IQT_INT, FU_CSR , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.S),
CSRRC -> List(Y, N, X, uopCSRRC, IQT_INT, FU_CSR , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.C),
CSRRWI -> List(Y, N, X, uopCSRRWI,IQT_INT, FU_CSR , RT_FIX, RT_PAS, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.W),
CSRRSI -> List(Y, N, X, uopCSRRSI,IQT_INT, FU_CSR , RT_FIX, RT_PAS, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.S),
CSRRCI -> List(Y, N, X, uopCSRRCI,IQT_INT, FU_CSR , RT_FIX, RT_PAS, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.C),
SFENCE_VMA->List(Y,N, X, uopSFENCE,IQT_MEM, FU_MEM , RT_X , RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N,M_SFENCE,0.U,N, N, N, Y, Y, CSR.N),
ECALL -> List(Y, N, X, uopERET ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, Y, Y, Y, CSR.I),
EBREAK -> List(Y, N, X, uopERET ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, Y, Y, Y, CSR.I),
SRET -> List(Y, N, X, uopERET ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.I),
MRET -> List(Y, N, X, uopERET ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.I),
DRET -> List(Y, N, X, uopERET ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.I),
WFI -> List(Y, N, X, uopWFI ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.I),
FENCE_I -> List(Y, N, X, uopNOP , IQT_INT, FU_X , RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, Y, M_X , 0.U, N, N, N, Y, Y, CSR.N),
FENCE -> List(Y, N, X, uopFENCE, IQT_INT, FU_MEM , RT_X , RT_X , RT_X , N, IS_X, N, Y, N, Y, N, M_X , 0.U, N, N, N, Y, Y, CSR.N), // TODO PERF make fence higher performance
// currently serializes pipeline
// frs3_en wakeup_delay
// is val inst? | imm sel | bypassable (aka, known/fixed latency)
// | is fp inst? | | uses_ldq | | is_br
// | | is single-prec? rs1 regtype | | | uses_stq | | |
// | | | micro-code | rs2 type| | | | is_amo | | |
// | | | | iq-type func unit | | | | | | | is_fence | | |
// | | | | | | | | | | | | | | is_fencei | | | is breakpoint or ecall?
// | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
// A-type | | | | | | | | | | | | | | | | | | | | | | | |
AMOADD_W-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_ADD, 0.U,N, N, N, Y, Y, CSR.N), // TODO make AMOs higherperformance
AMOXOR_W-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_XOR, 0.U,N, N, N, Y, Y, CSR.N),
AMOSWAP_W->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_SWAP,0.U,N, N, N, Y, Y, CSR.N),
AMOAND_W-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_AND, 0.U,N, N, N, Y, Y, CSR.N),
AMOOR_W -> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_OR, 0.U,N, N, N, Y, Y, CSR.N),
AMOMIN_W-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MIN, 0.U,N, N, N, Y, Y, CSR.N),
AMOMINU_W->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MINU,0.U,N, N, N, Y, Y, CSR.N),
AMOMAX_W-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MAX, 0.U,N, N, N, Y, Y, CSR.N),
AMOMAXU_W->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MAXU,0.U,N, N, N, Y, Y, CSR.N),
AMOADD_D-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_ADD, 0.U,N, N, N, Y, Y, CSR.N),
AMOXOR_D-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_XOR, 0.U,N, N, N, Y, Y, CSR.N),
AMOSWAP_D->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_SWAP,0.U,N, N, N, Y, Y, CSR.N),
AMOAND_D-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_AND, 0.U,N, N, N, Y, Y, CSR.N),
AMOOR_D -> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_OR, 0.U,N, N, N, Y, Y, CSR.N),
AMOMIN_D-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MIN, 0.U,N, N, N, Y, Y, CSR.N),
AMOMINU_D->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MINU,0.U,N, N, N, Y, Y, CSR.N),
AMOMAX_D-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MAX, 0.U,N, N, N, Y, Y, CSR.N),
AMOMAXU_D->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MAXU,0.U,N, N, N, Y, Y, CSR.N),
LR_W -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_X , N, IS_X, Y, N, N, N, N, M_XLR , 0.U,N, N, N, Y, Y, CSR.N),
LR_D -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_X , N, IS_X, Y, N, N, N, N, M_XLR , 0.U,N, N, N, Y, Y, CSR.N),
SC_W -> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XSC , 0.U,N, N, N, Y, Y, CSR.N),
SC_D -> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XSC , 0.U,N, N, N, Y, Y, CSR.N)
)
}
/**
* FP Decode constants
*/
object FDecode extends DecodeConstants
{
val table: Array[(BitPat, List[BitPat])] = Array(
// frs3_en wakeup_delay
// | imm sel | bypassable (aka, known/fixed latency)
// | | uses_ldq | | is_br
// is val inst? rs1 regtype | | | uses_stq | | |
// | is fp inst? | rs2 type| | | | is_amo | | |
// | | is dst single-prec? | | | | | | | is_fence | | |
// | | | micro-opcode | | | | | | | | is_fencei | | | is breakpoint or ecall
// | | | | iq_type func dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | unit regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
FLW -> List(Y, Y, Y, uopLD , IQT_MEM, FU_MEM, RT_FLT, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 0.U, N, N, N, N, N, CSR.N),
FLD -> List(Y, Y, N, uopLD , IQT_MEM, FU_MEM, RT_FLT, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 0.U, N, N, N, N, N, CSR.N),
FSW -> List(Y, Y, Y, uopSTA , IQT_MFP,FU_F2IMEM,RT_X , RT_FIX, RT_FLT, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N), // sort of a lie; broken into two micro-ops
FSD -> List(Y, Y, N, uopSTA , IQT_MFP,FU_F2IMEM,RT_X , RT_FIX, RT_FLT, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N),
FCLASS_S-> List(Y, Y, Y, uopFCLASS_S,IQT_FP , FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCLASS_D-> List(Y, Y, N, uopFCLASS_D,IQT_FP , FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMV_W_X -> List(Y, Y, Y, uopFMV_W_X, IQT_INT, FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMV_D_X -> List(Y, Y, N, uopFMV_D_X, IQT_INT, FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMV_X_W -> List(Y, Y, Y, uopFMV_X_W, IQT_FP , FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMV_X_D -> List(Y, Y, N, uopFMV_X_D, IQT_FP , FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSGNJ_S -> List(Y, Y, Y, uopFSGNJ_S, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSGNJ_D -> List(Y, Y, N, uopFSGNJ_D, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSGNJX_S-> List(Y, Y, Y, uopFSGNJ_S, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSGNJX_D-> List(Y, Y, N, uopFSGNJ_D, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSGNJN_S-> List(Y, Y, Y, uopFSGNJ_S, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSGNJN_D-> List(Y, Y, N, uopFSGNJ_D, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
// FP to FP
FCVT_S_D-> List(Y, Y, Y, uopFCVT_S_D,IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_D_S-> List(Y, Y, N, uopFCVT_D_S,IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
// Int to FP
FCVT_S_W-> List(Y, Y, Y, uopFCVT_S_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_S_WU->List(Y, Y, Y, uopFCVT_S_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_S_L-> List(Y, Y, Y, uopFCVT_S_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_S_LU->List(Y, Y, Y, uopFCVT_S_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_D_W-> List(Y, Y, N, uopFCVT_D_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_D_WU->List(Y, Y, N, uopFCVT_D_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_D_L-> List(Y, Y, N, uopFCVT_D_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_D_LU->List(Y, Y, N, uopFCVT_D_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
// FP to Int
FCVT_W_S-> List(Y, Y, Y, uopFCVT_X_S, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_WU_S->List(Y, Y, Y, uopFCVT_X_S, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_L_S-> List(Y, Y, Y, uopFCVT_X_S, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_LU_S->List(Y, Y, Y, uopFCVT_X_S, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_W_D-> List(Y, Y, N, uopFCVT_X_D, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_WU_D->List(Y, Y, N, uopFCVT_X_D, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_L_D-> List(Y, Y, N, uopFCVT_X_D, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_LU_D->List(Y, Y, N, uopFCVT_X_D, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
// "fp_single" is used for wb_data formatting (and debugging)
FEQ_S ->List(Y, Y, Y, uopCMPR_S , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FLT_S ->List(Y, Y, Y, uopCMPR_S , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FLE_S ->List(Y, Y, Y, uopCMPR_S , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FEQ_D ->List(Y, Y, N, uopCMPR_D , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FLT_D ->List(Y, Y, N, uopCMPR_D , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FLE_D ->List(Y, Y, N, uopCMPR_D , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMIN_S ->List(Y, Y, Y,uopFMINMAX_S,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMAX_S ->List(Y, Y, Y,uopFMINMAX_S,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMIN_D ->List(Y, Y, N,uopFMINMAX_D,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMAX_D ->List(Y, Y, N,uopFMINMAX_D,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FADD_S ->List(Y, Y, Y, uopFADD_S , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSUB_S ->List(Y, Y, Y, uopFSUB_S , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMUL_S ->List(Y, Y, Y, uopFMUL_S , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FADD_D ->List(Y, Y, N, uopFADD_D , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSUB_D ->List(Y, Y, N, uopFSUB_D , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMUL_D ->List(Y, Y, N, uopFMUL_D , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMADD_S ->List(Y, Y, Y, uopFMADD_S, IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMSUB_S ->List(Y, Y, Y, uopFMSUB_S, IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FNMADD_S ->List(Y, Y, Y, uopFNMADD_S,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FNMSUB_S ->List(Y, Y, Y, uopFNMSUB_S,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMADD_D ->List(Y, Y, N, uopFMADD_D, IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMSUB_D ->List(Y, Y, N, uopFMSUB_D, IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FNMADD_D ->List(Y, Y, N, uopFNMADD_D,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FNMSUB_D ->List(Y, Y, N, uopFNMSUB_D,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N)
)
}
/**
* FP Divide SquareRoot Constants
*/
object FDivSqrtDecode extends DecodeConstants
{
val table: Array[(BitPat, List[BitPat])] = Array(
// frs3_en wakeup_delay
// | imm sel | bypassable (aka, known/fixed latency)
// | | uses_ldq | | is_br
// is val inst? rs1 regtype | | | uses_stq | | |
// | is fp inst? | rs2 type| | | | is_amo | | |
// | | is dst single-prec? | | | | | | | is_fence | | |
// | | | micro-opcode | | | | | | | | is_fencei | | | is breakpoint or ecall
// | | | | iq-type func dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | unit regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
FDIV_S ->List(Y, Y, Y, uopFDIV_S , IQT_FP, FU_FDV, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FDIV_D ->List(Y, Y, N, uopFDIV_D , IQT_FP, FU_FDV, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSQRT_S ->List(Y, Y, Y, uopFSQRT_S, IQT_FP, FU_FDV, RT_FLT, RT_FLT, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSQRT_D ->List(Y, Y, N, uopFSQRT_D, IQT_FP, FU_FDV, RT_FLT, RT_FLT, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N)
)
}
//scalastyle:on
/**
* RoCC initial decode
*/
object RoCCDecode extends DecodeConstants
{
// Note: We use FU_CSR since CSR instructions cannot co-execute with RoCC instructions
// frs3_en wakeup_delay
// is val inst? | imm sel | bypassable (aka, known/fixed latency)
// | is fp inst? | | uses_ldq | | is_br
// | | is single-prec rs1 regtype | | | uses_stq | | |
// | | | | rs2 type| | | | is_amo | | |
// | | | micro-code func unit | | | | | | | is_fence | | |
// | | | | iq-type | | | | | | | | | is_fencei | | | is breakpoint or ecall?
// | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
// | | | | | | | | | | | | | | | | | | | | | | | |
val table: Array[(BitPat, List[BitPat])] = Array(// | | | | | | | | | | | | | | | | | | |
CUSTOM0 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM0_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM0_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM0_RD ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM0_RD_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM0_RD_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM1_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM1_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM1_RD ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM1_RD_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM1_RD_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM2_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM2_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM2_RD ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM2_RD_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM2_RD_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM3 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM3_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM3_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM3_RD ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM3_RD_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM3_RD_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N)
)
}
/**
* IO bundle for the Decode unit
*/
class DecodeUnitIo(implicit p: Parameters) extends BoomBundle
{
val enq = new Bundle { val uop = Input(new MicroOp()) }
val deq = new Bundle { val uop = Output(new MicroOp()) }
// from CSRFile
val status = Input(new freechips.rocketchip.rocket.MStatus())
val csr_decode = Flipped(new freechips.rocketchip.rocket.CSRDecodeIO)
val interrupt = Input(Bool())
val interrupt_cause = Input(UInt(xLen.W))
}
/**
* Decode unit that takes in a single instruction and generates a MicroOp.
*/
class DecodeUnit(implicit p: Parameters) extends BoomModule
with freechips.rocketchip.rocket.constants.MemoryOpConstants
{
val io = IO(new DecodeUnitIo)
val uop = Wire(new MicroOp())
uop := io.enq.uop
var decode_table = XDecode.table
if (usingFPU) decode_table ++= FDecode.table
if (usingFPU && usingFDivSqrt) decode_table ++= FDivSqrtDecode.table
if (usingRoCC) decode_table ++= RoCCDecode.table
decode_table ++= (if (xLen == 64) X64Decode.table else X32Decode.table)
val inst = uop.inst
val cs = Wire(new CtrlSigs()).decode(inst, decode_table)
// Exception Handling
io.csr_decode.inst := inst
val csr_en = cs.csr_cmd.isOneOf(CSR.S, CSR.C, CSR.W)
val csr_ren = cs.csr_cmd.isOneOf(CSR.S, CSR.C) && uop.lrs1 === 0.U
val system_insn = cs.csr_cmd === CSR.I
val sfence = cs.uopc === uopSFENCE
val cs_legal = cs.legal
// dontTouch(cs_legal)
val id_illegal_insn = !cs_legal ||
cs.fp_val && io.csr_decode.fp_illegal || // TODO check for illegal rm mode: (io.fpu.illegal_rm)
cs.rocc && io.csr_decode.rocc_illegal ||
cs.is_amo && !io.status.isa('a'-'a') ||
(cs.fp_val && !cs.fp_single) && !io.status.isa('d'-'a') ||
csr_en && (io.csr_decode.read_illegal || !csr_ren && io.csr_decode.write_illegal) ||
((sfence || system_insn) && io.csr_decode.system_illegal)
// cs.div && !csr.io.status.isa('m'-'a') || TODO check for illegal div instructions
def checkExceptions(x: Seq[(Bool, UInt)]) =
(x.map(_._1).reduce(_||_), PriorityMux(x))
val (xcpt_valid, xcpt_cause) = checkExceptions(List(
(io.interrupt && !io.enq.uop.is_sfb, io.interrupt_cause), // Disallow interrupts while we are handling a SFB
(uop.bp_debug_if, (CSR.debugTriggerCause).U),
(uop.bp_xcpt_if, (Causes.breakpoint).U),
(uop.xcpt_pf_if, (Causes.fetch_page_fault).U),
(uop.xcpt_ae_if, (Causes.fetch_access).U),
(id_illegal_insn, (Causes.illegal_instruction).U)))
uop.exception := xcpt_valid
uop.exc_cause := xcpt_cause
//-------------------------------------------------------------
uop.uopc := cs.uopc
uop.iq_type := cs.iq_type
uop.fu_code := cs.fu_code
// x-registers placed in 0-31, f-registers placed in 32-63.
// This allows us to straight-up compare register specifiers and not need to
// verify the rtypes (e.g., bypassing in rename).
uop.ldst := inst(RD_MSB,RD_LSB)
uop.lrs1 := inst(RS1_MSB,RS1_LSB)
uop.lrs2 := inst(RS2_MSB,RS2_LSB)
uop.lrs3 := inst(RS3_MSB,RS3_LSB)
uop.ldst_val := cs.dst_type =/= RT_X && !(uop.ldst === 0.U && uop.dst_rtype === RT_FIX)
uop.dst_rtype := cs.dst_type
uop.lrs1_rtype := cs.rs1_type
uop.lrs2_rtype := cs.rs2_type
uop.frs3_en := cs.frs3_en
uop.ldst_is_rs1 := uop.is_sfb_shadow
// SFB optimization
when (uop.is_sfb_shadow && cs.rs2_type === RT_X) {
uop.lrs2_rtype := RT_FIX
uop.lrs2 := inst(RD_MSB,RD_LSB)
uop.ldst_is_rs1 := false.B
} .elsewhen (uop.is_sfb_shadow && cs.uopc === uopADD && inst(RS1_MSB,RS1_LSB) === 0.U) {
uop.uopc := uopMOV
uop.lrs1 := inst(RD_MSB, RD_LSB)
uop.ldst_is_rs1 := true.B
}
when (uop.is_sfb_br) {
uop.fu_code := FU_JMP
}
uop.fp_val := cs.fp_val
uop.fp_single := cs.fp_single // TODO use this signal instead of the FPU decode's table signal?
uop.mem_cmd := cs.mem_cmd
uop.mem_size := Mux(cs.mem_cmd.isOneOf(M_SFENCE, M_FLUSH_ALL), Cat(uop.lrs2 =/= 0.U, uop.lrs1 =/= 0.U), inst(13,12))
uop.mem_signed := !inst(14)
uop.uses_ldq := cs.uses_ldq
uop.uses_stq := cs.uses_stq
uop.is_amo := cs.is_amo
uop.is_fence := cs.is_fence
uop.is_fencei := cs.is_fencei
uop.is_sys_pc2epc := cs.is_sys_pc2epc
uop.is_unique := cs.inst_unique
uop.flush_on_commit := cs.flush_on_commit || (csr_en && !csr_ren && io.csr_decode.write_flush)
uop.bypassable := cs.bypassable
//-------------------------------------------------------------
// immediates
// repackage the immediate, and then pass the fewest number of bits around
val di24_20 = Mux(cs.imm_sel === IS_B || cs.imm_sel === IS_S, inst(11,7), inst(24,20))
uop.imm_packed := Cat(inst(31,25), di24_20, inst(19,12))
//-------------------------------------------------------------
uop.is_br := cs.is_br
uop.is_jal := (uop.uopc === uopJAL)
uop.is_jalr := (uop.uopc === uopJALR)
// uop.is_jump := cs.is_jal || (uop.uopc === uopJALR)
// uop.is_ret := (uop.uopc === uopJALR) &&
// (uop.ldst === X0) &&
// (uop.lrs1 === RA)
// uop.is_call := (uop.uopc === uopJALR || uop.uopc === uopJAL) &&
// (uop.ldst === RA)
//-------------------------------------------------------------
io.deq.uop := uop
}
/**
* Smaller Decode unit for the Frontend to decode different
* branches.
* Accepts EXPANDED RVC instructions
*/
class BranchDecodeSignals(implicit p: Parameters) extends BoomBundle
{
val is_ret = Bool()
val is_call = Bool()
val target = UInt(vaddrBitsExtended.W)
val cfi_type = UInt(CFI_SZ.W)
// Is this branch a short forwards jump?
val sfb_offset = Valid(UInt(log2Ceil(icBlockBytes).W))
// Is this instruction allowed to be inside a sfb?
val shadowable = Bool()
}
class BranchDecode(implicit p: Parameters) extends BoomModule
{
val io = IO(new Bundle {
val inst = Input(UInt(32.W))
val pc = Input(UInt(vaddrBitsExtended.W))
val out = Output(new BranchDecodeSignals)
})
val bpd_csignals =
freechips.rocketchip.rocket.DecodeLogic(io.inst,
List[BitPat](N, N, N, N, X),
//// is br?
//// | is jal?
//// | | is jalr?
//// | | |
//// | | | shadowable
//// | | | | has_rs2
//// | | | | |
Array[(BitPat, List[BitPat])](
JAL -> List(N, Y, N, N, X),
JALR -> List(N, N, Y, N, X),
BEQ -> List(Y, N, N, N, X),
BNE -> List(Y, N, N, N, X),
BGE -> List(Y, N, N, N, X),
BGEU -> List(Y, N, N, N, X),
BLT -> List(Y, N, N, N, X),
BLTU -> List(Y, N, N, N, X),
SLLI -> List(N, N, N, Y, N),
SRLI -> List(N, N, N, Y, N),
SRAI -> List(N, N, N, Y, N),
ADDIW -> List(N, N, N, Y, N),
SLLIW -> List(N, N, N, Y, N),
SRAIW -> List(N, N, N, Y, N),
SRLIW -> List(N, N, N, Y, N),
ADDW -> List(N, N, N, Y, Y),
SUBW -> List(N, N, N, Y, Y),
SLLW -> List(N, N, N, Y, Y),
SRAW -> List(N, N, N, Y, Y),
SRLW -> List(N, N, N, Y, Y),
LUI -> List(N, N, N, Y, N),
ADDI -> List(N, N, N, Y, N),
ANDI -> List(N, N, N, Y, N),
ORI -> List(N, N, N, Y, N),
XORI -> List(N, N, N, Y, N),
SLTI -> List(N, N, N, Y, N),
SLTIU -> List(N, N, N, Y, N),
SLL -> List(N, N, N, Y, Y),
ADD -> List(N, N, N, Y, Y),
SUB -> List(N, N, N, Y, Y),
SLT -> List(N, N, N, Y, Y),
SLTU -> List(N, N, N, Y, Y),
AND -> List(N, N, N, Y, Y),
OR -> List(N, N, N, Y, Y),
XOR -> List(N, N, N, Y, Y),
SRA -> List(N, N, N, Y, Y),
SRL -> List(N, N, N, Y, Y)
))
val cs_is_br = bpd_csignals(0)(0)
val cs_is_jal = bpd_csignals(1)(0)
val cs_is_jalr = bpd_csignals(2)(0)
val cs_is_shadowable = bpd_csignals(3)(0)
val cs_has_rs2 = bpd_csignals(4)(0)
io.out.is_call := (cs_is_jal || cs_is_jalr) && GetRd(io.inst) === RA
io.out.is_ret := cs_is_jalr && GetRs1(io.inst) === BitPat("b00?01") && GetRd(io.inst) === X0
io.out.target := Mux(cs_is_br, ComputeBranchTarget(io.pc, io.inst, xLen),
ComputeJALTarget(io.pc, io.inst, xLen))
io.out.cfi_type :=
Mux(cs_is_jalr,
CFI_JALR,
Mux(cs_is_jal,
CFI_JAL,
Mux(cs_is_br,
CFI_BR,
CFI_X)))
val br_offset = Cat(io.inst(7), io.inst(30,25), io.inst(11,8), 0.U(1.W))
// Is a sfb if it points forwards (offset is positive)
io.out.sfb_offset.valid := cs_is_br && !io.inst(31) && br_offset =/= 0.U && (br_offset >> log2Ceil(icBlockBytes)) === 0.U
io.out.sfb_offset.bits := br_offset
io.out.shadowable := cs_is_shadowable && (
!cs_has_rs2 ||
(GetRs1(io.inst) === GetRd(io.inst)) ||
(io.inst === ADD && GetRs1(io.inst) === X0)
)
}
/**
* Track the current "branch mask", and give out the branch mask to each micro-op in Decode
* (each micro-op in the machine has a branch mask which says which branches it
* is being speculated under).
*
* @param pl_width pipeline width for the processor
*/
class BranchMaskGenerationLogic(val pl_width: Int)(implicit p: Parameters) extends BoomModule
{
val io = IO(new Bundle {
// guess if the uop is a branch (we'll catch this later)
val is_branch = Input(Vec(pl_width, Bool()))
// lock in that it's actually a branch and will fire, so we update
// the branch_masks.
val will_fire = Input(Vec(pl_width, Bool()))
// give out tag immediately (needed in rename)
// mask can come later in the cycle
val br_tag = Output(Vec(pl_width, UInt(brTagSz.W)))
val br_mask = Output(Vec(pl_width, UInt(maxBrCount.W)))
// tell decoders the branch mask has filled up, but on the granularity
// of an individual micro-op (so some micro-ops can go through)
val is_full = Output(Vec(pl_width, Bool()))
val brupdate = Input(new BrUpdateInfo())
val flush_pipeline = Input(Bool())
val debug_branch_mask = Output(UInt(maxBrCount.W))
})
val branch_mask = RegInit(0.U(maxBrCount.W))
//-------------------------------------------------------------
// Give out the branch tag to each branch micro-op
var allocate_mask = branch_mask
val tag_masks = Wire(Vec(pl_width, UInt(maxBrCount.W)))
for (w <- 0 until pl_width) {
// TODO this is a loss of performance as we're blocking branches based on potentially fake branches
io.is_full(w) := (allocate_mask === ~(0.U(maxBrCount.W))) && io.is_branch(w)
// find br_tag and compute next br_mask
val new_br_tag = Wire(UInt(brTagSz.W))
new_br_tag := 0.U
tag_masks(w) := 0.U
for (i <- maxBrCount-1 to 0 by -1) {
when (~allocate_mask(i)) {
new_br_tag := i.U
tag_masks(w) := (1.U << i.U)
}
}
io.br_tag(w) := new_br_tag
allocate_mask = Mux(io.is_branch(w), tag_masks(w) | allocate_mask, allocate_mask)
}
//-------------------------------------------------------------
// Give out the branch mask to each micro-op
// (kill off the bits that corresponded to branches that aren't going to fire)
var curr_mask = branch_mask
for (w <- 0 until pl_width) {
io.br_mask(w) := GetNewBrMask(io.brupdate, curr_mask)
curr_mask = Mux(io.will_fire(w), tag_masks(w) | curr_mask, curr_mask)
}
//-------------------------------------------------------------
// Update the current branch_mask
when (io.flush_pipeline) {
branch_mask := 0.U
} .otherwise {
val mask = Mux(io.brupdate.b2.mispredict,
io.brupdate.b2.uop.br_mask,
~(0.U(maxBrCount.W)))
branch_mask := GetNewBrMask(io.brupdate, curr_mask) & mask
}
io.debug_branch_mask := branch_mask
} | module BranchDecode(
input [31:0] io_inst,
input [39:0] io_pc,
output io_out_is_ret,
output io_out_is_call,
output [39:0] io_out_target,
output [2:0] io_out_cfi_type,
output io_out_sfb_offset_valid,
output [5:0] io_out_sfb_offset_bits,
output io_out_shadowable
);
wire [29:0] bpd_csignals_decoded_invInputs = ~(io_inst[31:2]);
wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_2 = {io_inst[0], io_inst[1], bpd_csignals_decoded_invInputs[0], io_inst[4], io_inst[5], bpd_csignals_decoded_invInputs[4], bpd_csignals_decoded_invInputs[10], bpd_csignals_decoded_invInputs[11], bpd_csignals_decoded_invInputs[12], bpd_csignals_decoded_invInputs[23], bpd_csignals_decoded_invInputs[24], bpd_csignals_decoded_invInputs[25], bpd_csignals_decoded_invInputs[26], bpd_csignals_decoded_invInputs[27], bpd_csignals_decoded_invInputs[29]};
wire [13:0] _bpd_csignals_decoded_andMatrixOutputs_T_3 = {io_inst[0], io_inst[1], bpd_csignals_decoded_invInputs[0], bpd_csignals_decoded_invInputs[1], io_inst[4], io_inst[5], bpd_csignals_decoded_invInputs[4], bpd_csignals_decoded_invInputs[23], bpd_csignals_decoded_invInputs[24], bpd_csignals_decoded_invInputs[25], bpd_csignals_decoded_invInputs[26], bpd_csignals_decoded_invInputs[27], bpd_csignals_decoded_invInputs[28], bpd_csignals_decoded_invInputs[29]};
wire [9:0] _bpd_csignals_decoded_andMatrixOutputs_T_7 = {io_inst[0], io_inst[1], io_inst[2], bpd_csignals_decoded_invInputs[1], bpd_csignals_decoded_invInputs[2], io_inst[5], io_inst[6], bpd_csignals_decoded_invInputs[10], bpd_csignals_decoded_invInputs[11], bpd_csignals_decoded_invInputs[12]};
wire [6:0] _bpd_csignals_decoded_andMatrixOutputs_T_8 = {io_inst[0], io_inst[1], io_inst[2], io_inst[3], bpd_csignals_decoded_invInputs[2], io_inst[5], io_inst[6]};
wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_15 = {io_inst[0], io_inst[1], bpd_csignals_decoded_invInputs[0], io_inst[4], io_inst[5], bpd_csignals_decoded_invInputs[4], io_inst[12], bpd_csignals_decoded_invInputs[11], io_inst[14], bpd_csignals_decoded_invInputs[23], bpd_csignals_decoded_invInputs[24], bpd_csignals_decoded_invInputs[25], bpd_csignals_decoded_invInputs[26], bpd_csignals_decoded_invInputs[27], bpd_csignals_decoded_invInputs[29]};
wire [1:0] _bpd_csignals_decoded_orMatrixOutputs_T_6 = {&{io_inst[0], io_inst[1], bpd_csignals_decoded_invInputs[0], bpd_csignals_decoded_invInputs[1], bpd_csignals_decoded_invInputs[2], io_inst[5], io_inst[6], bpd_csignals_decoded_invInputs[11]}, &{io_inst[0], io_inst[1], bpd_csignals_decoded_invInputs[0], bpd_csignals_decoded_invInputs[1], bpd_csignals_decoded_invInputs[2], io_inst[5], io_inst[6], io_inst[14]}};
assign io_out_is_ret = (&_bpd_csignals_decoded_andMatrixOutputs_T_7) & {io_inst[19:18], io_inst[16:15]} == 4'h1 & io_inst[11:7] == 5'h0;
assign io_out_is_call = ((&_bpd_csignals_decoded_andMatrixOutputs_T_8) | (&_bpd_csignals_decoded_andMatrixOutputs_T_7)) & io_inst[11:7] == 5'h1;
assign io_out_target = (|_bpd_csignals_decoded_orMatrixOutputs_T_6) ? io_pc + {{28{io_inst[31]}}, io_inst[7], io_inst[30:25], io_inst[11:8], 1'h0} & 40'hFFFFFFFFFE : io_pc + {{20{io_inst[31]}}, io_inst[19:12], io_inst[20], io_inst[30:21], 1'h0} & 40'hFFFFFFFFFE;
assign io_out_cfi_type = (&_bpd_csignals_decoded_andMatrixOutputs_T_7) ? 3'h3 : (&_bpd_csignals_decoded_andMatrixOutputs_T_8) ? 3'h2 : {2'h0, |_bpd_csignals_decoded_orMatrixOutputs_T_6};
assign io_out_sfb_offset_valid = (|_bpd_csignals_decoded_orMatrixOutputs_T_6) & ~(io_inst[31]) & (|{io_inst[7], io_inst[30:25], io_inst[11:8]}) & {io_inst[7], io_inst[30:26]} == 6'h0;
assign io_out_sfb_offset_bits = {io_inst[25], io_inst[11:8], 1'h0};
assign io_out_shadowable =
(|{&{io_inst[0], io_inst[1], bpd_csignals_decoded_invInputs[0], bpd_csignals_decoded_invInputs[1], io_inst[4], bpd_csignals_decoded_invInputs[3], bpd_csignals_decoded_invInputs[4], bpd_csignals_decoded_invInputs[10]},
&{io_inst[0], io_inst[1], bpd_csignals_decoded_invInputs[0], io_inst[4], bpd_csignals_decoded_invInputs[3], bpd_csignals_decoded_invInputs[4], bpd_csignals_decoded_invInputs[10], bpd_csignals_decoded_invInputs[11], bpd_csignals_decoded_invInputs[12]},
&_bpd_csignals_decoded_andMatrixOutputs_T_2,
&_bpd_csignals_decoded_andMatrixOutputs_T_3,
&{io_inst[0], io_inst[1], io_inst[2], bpd_csignals_decoded_invInputs[1], io_inst[4], io_inst[5], bpd_csignals_decoded_invInputs[4]},
&{io_inst[0], io_inst[1], bpd_csignals_decoded_invInputs[0], bpd_csignals_decoded_invInputs[1], io_inst[4], bpd_csignals_decoded_invInputs[3], bpd_csignals_decoded_invInputs[4], io_inst[12], bpd_csignals_decoded_invInputs[11], bpd_csignals_decoded_invInputs[24], bpd_csignals_decoded_invInputs[25], bpd_csignals_decoded_invInputs[26], bpd_csignals_decoded_invInputs[27], bpd_csignals_decoded_invInputs[28], bpd_csignals_decoded_invInputs[29]},
&{io_inst[0], io_inst[1], bpd_csignals_decoded_invInputs[0], io_inst[3], io_inst[4], bpd_csignals_decoded_invInputs[4], io_inst[12], bpd_csignals_decoded_invInputs[11], bpd_csignals_decoded_invInputs[23], bpd_csignals_decoded_invInputs[24], bpd_csignals_decoded_invInputs[25], bpd_csignals_decoded_invInputs[26], bpd_csignals_decoded_invInputs[27], bpd_csignals_decoded_invInputs[28], bpd_csignals_decoded_invInputs[29]},
&{io_inst[0], io_inst[1], bpd_csignals_decoded_invInputs[0], bpd_csignals_decoded_invInputs[1], io_inst[4], bpd_csignals_decoded_invInputs[3], bpd_csignals_decoded_invInputs[4], io_inst[13]},
&{io_inst[0], io_inst[1], bpd_csignals_decoded_invInputs[0], bpd_csignals_decoded_invInputs[1], io_inst[4], bpd_csignals_decoded_invInputs[3], bpd_csignals_decoded_invInputs[4], io_inst[12], bpd_csignals_decoded_invInputs[11], io_inst[14], bpd_csignals_decoded_invInputs[24], bpd_csignals_decoded_invInputs[25], bpd_csignals_decoded_invInputs[26], bpd_csignals_decoded_invInputs[27], bpd_csignals_decoded_invInputs[29]},
&{io_inst[0], io_inst[1], bpd_csignals_decoded_invInputs[0], io_inst[3], io_inst[4], bpd_csignals_decoded_invInputs[4], io_inst[12], bpd_csignals_decoded_invInputs[11], io_inst[14], bpd_csignals_decoded_invInputs[23], bpd_csignals_decoded_invInputs[24], bpd_csignals_decoded_invInputs[25], bpd_csignals_decoded_invInputs[26], bpd_csignals_decoded_invInputs[27], bpd_csignals_decoded_invInputs[29]},
&_bpd_csignals_decoded_andMatrixOutputs_T_15}) & ({&_bpd_csignals_decoded_andMatrixOutputs_T_2, &_bpd_csignals_decoded_andMatrixOutputs_T_3, &{io_inst[0], io_inst[1], bpd_csignals_decoded_invInputs[0], io_inst[4], io_inst[5], bpd_csignals_decoded_invInputs[4], bpd_csignals_decoded_invInputs[11], bpd_csignals_decoded_invInputs[12], bpd_csignals_decoded_invInputs[23], bpd_csignals_decoded_invInputs[24], bpd_csignals_decoded_invInputs[25], bpd_csignals_decoded_invInputs[26], bpd_csignals_decoded_invInputs[27], bpd_csignals_decoded_invInputs[28], bpd_csignals_decoded_invInputs[29]}, &_bpd_csignals_decoded_andMatrixOutputs_T_15} == 4'h0 | io_inst[19:15] == io_inst[11:7] | {io_inst[31:25], io_inst[14:12], io_inst[6:0]} == 17'h33 & io_inst[19:15] == 5'h0);
endmodule |
Generate the Verilog code corresponding to this Chisel code //******************************************************************************
// Copyright (c) 2012 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// RISCV Out-of-Order Load/Store Unit
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// Load/Store Unit is made up of the Load Queue, the Store Queue (LDQ and STQ).
//
// Stores are sent to memory at (well, after) commit, loads are executed
// optimstically ASAP. If a misspeculation was discovered, the pipeline is
// cleared. Loads put to sleep are retried. If a LoadAddr and StoreAddr match,
// the Load can receive its data by forwarding data out of the Store Queue.
//
// Currently, loads are sent to memory immediately, and in parallel do an
// associative search of the STQ, on entering the LSU. If a hit on the STQ
// search, the memory request is killed on the next cycle, and if the STQ entry
// is valid, the store data is forwarded to the load (delayed to match the
// load-use delay to delay with the write-port structural hazard). If the store
// data is not present, or it's only a partial match (SB->LH), the load is put
// to sleep in the LDQ.
//
// Memory ordering violations are detected by stores at their addr-gen time by
// associatively searching the LDQ for newer loads that have been issued to
// memory.
//
// The store queue contains both speculated and committed stores.
//
// Only one port to memory... loads and stores have to fight for it, West Side
// Story style.
//
// TODO:
// - Add predicting structure for ordering failures
// - currently won't STD forward if DMEM is busy
// - ability to turn off things if VM is disabled
// - reconsider port count of the wakeup, retry stuff
package boom.v3.lsu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.rocket
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util.Str
import boom.v3.common._
import boom.v3.exu.{BrUpdateInfo, Exception, FuncUnitResp, CommitSignals, ExeUnitResp}
import boom.v3.util.{BoolToChar, AgePriorityEncoder, IsKilledByBranch, GetNewBrMask, WrapInc, IsOlder, UpdateBrMask}
class LSUExeIO(implicit p: Parameters) extends BoomBundle()(p)
{
// The "resp" of the maddrcalc is really a "req" to the LSU
val req = Flipped(new ValidIO(new FuncUnitResp(xLen)))
// Send load data to regfiles
val iresp = new DecoupledIO(new boom.v3.exu.ExeUnitResp(xLen))
val fresp = new DecoupledIO(new boom.v3.exu.ExeUnitResp(xLen+1)) // TODO: Should this be fLen?
}
class BoomDCacheReq(implicit p: Parameters) extends BoomBundle()(p)
with HasBoomUOP
{
val addr = UInt(coreMaxAddrBits.W)
val data = Bits(coreDataBits.W)
val is_hella = Bool() // Is this the hellacache req? If so this is not tracked in LDQ or STQ
}
class BoomDCacheResp(implicit p: Parameters) extends BoomBundle()(p)
with HasBoomUOP
{
val data = Bits(coreDataBits.W)
val is_hella = Bool()
}
class LSUDMemIO(implicit p: Parameters, edge: TLEdgeOut) extends BoomBundle()(p)
{
// In LSU's dmem stage, send the request
val req = new DecoupledIO(Vec(memWidth, Valid(new BoomDCacheReq)))
// In LSU's LCAM search stage, kill if order fail (or forwarding possible)
val s1_kill = Output(Vec(memWidth, Bool()))
// Get a request any cycle
val resp = Flipped(Vec(memWidth, new ValidIO(new BoomDCacheResp)))
// In our response stage, if we get a nack, we need to reexecute
val nack = Flipped(Vec(memWidth, new ValidIO(new BoomDCacheReq)))
val brupdate = Output(new BrUpdateInfo)
val exception = Output(Bool())
val rob_pnr_idx = Output(UInt(robAddrSz.W))
val rob_head_idx = Output(UInt(robAddrSz.W))
val release = Flipped(new DecoupledIO(new TLBundleC(edge.bundle)))
// Clears prefetching MSHRs
val force_order = Output(Bool())
val ordered = Input(Bool())
val perf = Input(new Bundle {
val acquire = Bool()
val release = Bool()
})
}
class LSUCoreIO(implicit p: Parameters) extends BoomBundle()(p)
{
val exe = Vec(memWidth, new LSUExeIO)
val dis_uops = Flipped(Vec(coreWidth, Valid(new MicroOp)))
val dis_ldq_idx = Output(Vec(coreWidth, UInt(ldqAddrSz.W)))
val dis_stq_idx = Output(Vec(coreWidth, UInt(stqAddrSz.W)))
val ldq_full = Output(Vec(coreWidth, Bool()))
val stq_full = Output(Vec(coreWidth, Bool()))
val fp_stdata = Flipped(Decoupled(new ExeUnitResp(fLen)))
val commit = Input(new CommitSignals)
val commit_load_at_rob_head = Input(Bool())
// Stores clear busy bit when stdata is received
// memWidth for int, 1 for fp (to avoid back-pressure fpstdat)
val clr_bsy = Output(Vec(memWidth + 1, Valid(UInt(robAddrSz.W))))
// Speculatively safe load (barring memory ordering failure)
val clr_unsafe = Output(Vec(memWidth, Valid(UInt(robAddrSz.W))))
// Tell the DCache to clear prefetches/speculating misses
val fence_dmem = Input(Bool())
// Speculatively tell the IQs that we'll get load data back next cycle
val spec_ld_wakeup = Output(Vec(memWidth, Valid(UInt(maxPregSz.W))))
// Tell the IQs that the load we speculated last cycle was misspeculated
val ld_miss = Output(Bool())
val brupdate = Input(new BrUpdateInfo)
val rob_pnr_idx = Input(UInt(robAddrSz.W))
val rob_head_idx = Input(UInt(robAddrSz.W))
val exception = Input(Bool())
val fencei_rdy = Output(Bool())
val lxcpt = Output(Valid(new Exception))
val tsc_reg = Input(UInt())
val perf = Output(new Bundle {
val acquire = Bool()
val release = Bool()
val tlbMiss = Bool()
})
}
class LSUIO(implicit p: Parameters, edge: TLEdgeOut) extends BoomBundle()(p)
{
val ptw = new rocket.TLBPTWIO
val core = new LSUCoreIO
val dmem = new LSUDMemIO
val hellacache = Flipped(new freechips.rocketchip.rocket.HellaCacheIO)
}
class LDQEntry(implicit p: Parameters) extends BoomBundle()(p)
with HasBoomUOP
{
val addr = Valid(UInt(coreMaxAddrBits.W))
val addr_is_virtual = Bool() // Virtual address, we got a TLB miss
val addr_is_uncacheable = Bool() // Uncacheable, wait until head of ROB to execute
val executed = Bool() // load sent to memory, reset by NACKs
val succeeded = Bool()
val order_fail = Bool()
val observed = Bool()
val st_dep_mask = UInt(numStqEntries.W) // list of stores older than us
val youngest_stq_idx = UInt(stqAddrSz.W) // index of the oldest store younger than us
val forward_std_val = Bool()
val forward_stq_idx = UInt(stqAddrSz.W) // Which store did we get the store-load forward from?
val debug_wb_data = UInt(xLen.W)
}
class STQEntry(implicit p: Parameters) extends BoomBundle()(p)
with HasBoomUOP
{
val addr = Valid(UInt(coreMaxAddrBits.W))
val addr_is_virtual = Bool() // Virtual address, we got a TLB miss
val data = Valid(UInt(xLen.W))
val committed = Bool() // committed by ROB
val succeeded = Bool() // D$ has ack'd this, we don't need to maintain this anymore
val debug_wb_data = UInt(xLen.W)
}
class LSU(implicit p: Parameters, edge: TLEdgeOut) extends BoomModule()(p)
with rocket.HasL1HellaCacheParameters
{
val io = IO(new LSUIO)
io.hellacache := DontCare
val ldq = Reg(Vec(numLdqEntries, Valid(new LDQEntry)))
val stq = Reg(Vec(numStqEntries, Valid(new STQEntry)))
val ldq_head = Reg(UInt(ldqAddrSz.W))
val ldq_tail = Reg(UInt(ldqAddrSz.W))
val stq_head = Reg(UInt(stqAddrSz.W)) // point to next store to clear from STQ (i.e., send to memory)
val stq_tail = Reg(UInt(stqAddrSz.W))
val stq_commit_head = Reg(UInt(stqAddrSz.W)) // point to next store to commit
val stq_execute_head = Reg(UInt(stqAddrSz.W)) // point to next store to execute
// If we got a mispredict, the tail will be misaligned for 1 extra cycle
assert (io.core.brupdate.b2.mispredict ||
stq(stq_execute_head).valid ||
stq_head === stq_execute_head ||
stq_tail === stq_execute_head,
"stq_execute_head got off track.")
val h_ready :: h_s1 :: h_s2 :: h_s2_nack :: h_wait :: h_replay :: h_dead :: Nil = Enum(7)
// s1 : do TLB, if success and not killed, fire request go to h_s2
// store s1_data to register
// if tlb miss, go to s2_nack
// if don't get TLB, go to s2_nack
// store tlb xcpt
// s2 : If kill, go to dead
// If tlb xcpt, send tlb xcpt, go to dead
// s2_nack : send nack, go to dead
// wait : wait for response, if nack, go to replay
// replay : refire request, use already translated address
// dead : wait for response, ignore it
val hella_state = RegInit(h_ready)
val hella_req = Reg(new rocket.HellaCacheReq)
val hella_data = Reg(new rocket.HellaCacheWriteData)
val hella_paddr = Reg(UInt(paddrBits.W))
val hella_xcpt = Reg(new rocket.HellaCacheExceptions)
val dtlb = Module(new NBDTLB(
instruction = false, lgMaxSize = log2Ceil(coreDataBytes), rocket.TLBConfig(dcacheParams.nTLBSets, dcacheParams.nTLBWays)))
io.ptw <> dtlb.io.ptw
io.core.perf.tlbMiss := io.ptw.req.fire
io.core.perf.acquire := io.dmem.perf.acquire
io.core.perf.release := io.dmem.perf.release
val clear_store = WireInit(false.B)
val live_store_mask = RegInit(0.U(numStqEntries.W))
var next_live_store_mask = Mux(clear_store, live_store_mask & ~(1.U << stq_head),
live_store_mask)
def widthMap[T <: Data](f: Int => T) = VecInit((0 until memWidth).map(f))
//-------------------------------------------------------------
//-------------------------------------------------------------
// Enqueue new entries
//-------------------------------------------------------------
//-------------------------------------------------------------
// This is a newer store than existing loads, so clear the bit in all the store dependency masks
for (i <- 0 until numLdqEntries)
{
when (clear_store)
{
ldq(i).bits.st_dep_mask := ldq(i).bits.st_dep_mask & ~(1.U << stq_head)
}
}
// Decode stage
var ld_enq_idx = ldq_tail
var st_enq_idx = stq_tail
val stq_nonempty = (0 until numStqEntries).map{ i => stq(i).valid }.reduce(_||_) =/= 0.U
var ldq_full = Bool()
var stq_full = Bool()
for (w <- 0 until coreWidth)
{
ldq_full = WrapInc(ld_enq_idx, numLdqEntries) === ldq_head
io.core.ldq_full(w) := ldq_full
io.core.dis_ldq_idx(w) := ld_enq_idx
stq_full = WrapInc(st_enq_idx, numStqEntries) === stq_head
io.core.stq_full(w) := stq_full
io.core.dis_stq_idx(w) := st_enq_idx
val dis_ld_val = io.core.dis_uops(w).valid && io.core.dis_uops(w).bits.uses_ldq && !io.core.dis_uops(w).bits.exception
val dis_st_val = io.core.dis_uops(w).valid && io.core.dis_uops(w).bits.uses_stq && !io.core.dis_uops(w).bits.exception
when (dis_ld_val)
{
ldq(ld_enq_idx).valid := true.B
ldq(ld_enq_idx).bits.uop := io.core.dis_uops(w).bits
ldq(ld_enq_idx).bits.youngest_stq_idx := st_enq_idx
ldq(ld_enq_idx).bits.st_dep_mask := next_live_store_mask
ldq(ld_enq_idx).bits.addr.valid := false.B
ldq(ld_enq_idx).bits.executed := false.B
ldq(ld_enq_idx).bits.succeeded := false.B
ldq(ld_enq_idx).bits.order_fail := false.B
ldq(ld_enq_idx).bits.observed := false.B
ldq(ld_enq_idx).bits.forward_std_val := false.B
assert (ld_enq_idx === io.core.dis_uops(w).bits.ldq_idx, "[lsu] mismatch enq load tag.")
assert (!ldq(ld_enq_idx).valid, "[lsu] Enqueuing uop is overwriting ldq entries")
}
.elsewhen (dis_st_val)
{
stq(st_enq_idx).valid := true.B
stq(st_enq_idx).bits.uop := io.core.dis_uops(w).bits
stq(st_enq_idx).bits.addr.valid := false.B
stq(st_enq_idx).bits.data.valid := false.B
stq(st_enq_idx).bits.committed := false.B
stq(st_enq_idx).bits.succeeded := false.B
assert (st_enq_idx === io.core.dis_uops(w).bits.stq_idx, "[lsu] mismatch enq store tag.")
assert (!stq(st_enq_idx).valid, "[lsu] Enqueuing uop is overwriting stq entries")
}
ld_enq_idx = Mux(dis_ld_val, WrapInc(ld_enq_idx, numLdqEntries),
ld_enq_idx)
next_live_store_mask = Mux(dis_st_val, next_live_store_mask | (1.U << st_enq_idx),
next_live_store_mask)
st_enq_idx = Mux(dis_st_val, WrapInc(st_enq_idx, numStqEntries),
st_enq_idx)
assert(!(dis_ld_val && dis_st_val), "A UOP is trying to go into both the LDQ and the STQ")
}
ldq_tail := ld_enq_idx
stq_tail := st_enq_idx
io.dmem.force_order := io.core.fence_dmem
io.core.fencei_rdy := !stq_nonempty && io.dmem.ordered
//-------------------------------------------------------------
//-------------------------------------------------------------
// Execute stage (access TLB, send requests to Memory)
//-------------------------------------------------------------
//-------------------------------------------------------------
// We can only report 1 exception per cycle.
// Just be sure to report the youngest one
val mem_xcpt_valid = Wire(Bool())
val mem_xcpt_cause = Wire(UInt())
val mem_xcpt_uop = Wire(new MicroOp)
val mem_xcpt_vaddr = Wire(UInt())
//---------------------------------------
// Can-fire logic and wakeup/retry select
//
// First we determine what operations are waiting to execute.
// These are the "can_fire"/"will_fire" signals
val will_fire_load_incoming = Wire(Vec(memWidth, Bool()))
val will_fire_stad_incoming = Wire(Vec(memWidth, Bool()))
val will_fire_sta_incoming = Wire(Vec(memWidth, Bool()))
val will_fire_std_incoming = Wire(Vec(memWidth, Bool()))
val will_fire_sfence = Wire(Vec(memWidth, Bool()))
val will_fire_hella_incoming = Wire(Vec(memWidth, Bool()))
val will_fire_hella_wakeup = Wire(Vec(memWidth, Bool()))
val will_fire_release = Wire(Vec(memWidth, Bool()))
val will_fire_load_retry = Wire(Vec(memWidth, Bool()))
val will_fire_sta_retry = Wire(Vec(memWidth, Bool()))
val will_fire_store_commit = Wire(Vec(memWidth, Bool()))
val will_fire_load_wakeup = Wire(Vec(memWidth, Bool()))
val exe_req = WireInit(VecInit(io.core.exe.map(_.req)))
// Sfence goes through all pipes
for (i <- 0 until memWidth) {
when (io.core.exe(i).req.bits.sfence.valid) {
exe_req := VecInit(Seq.fill(memWidth) { io.core.exe(i).req })
}
}
// -------------------------------
// Assorted signals for scheduling
// Don't wakeup a load if we just sent it last cycle or two cycles ago
// The block_load_mask may be wrong, but the executing_load mask must be accurate
val block_load_mask = WireInit(VecInit((0 until numLdqEntries).map(x=>false.B)))
val p1_block_load_mask = RegNext(block_load_mask)
val p2_block_load_mask = RegNext(p1_block_load_mask)
// Prioritize emptying the store queue when it is almost full
val stq_almost_full = RegNext(WrapInc(WrapInc(st_enq_idx, numStqEntries), numStqEntries) === stq_head ||
WrapInc(st_enq_idx, numStqEntries) === stq_head)
// The store at the commit head needs the DCache to appear ordered
// Delay firing load wakeups and retries now
val store_needs_order = WireInit(false.B)
val ldq_incoming_idx = widthMap(i => exe_req(i).bits.uop.ldq_idx)
val ldq_incoming_e = widthMap(i => ldq(ldq_incoming_idx(i)))
val stq_incoming_idx = widthMap(i => exe_req(i).bits.uop.stq_idx)
val stq_incoming_e = widthMap(i => stq(stq_incoming_idx(i)))
val ldq_retry_idx = RegNext(AgePriorityEncoder((0 until numLdqEntries).map(i => {
val e = ldq(i).bits
val block = block_load_mask(i) || p1_block_load_mask(i)
e.addr.valid && e.addr_is_virtual && !block
}), ldq_head))
val ldq_retry_e = ldq(ldq_retry_idx)
val stq_retry_idx = RegNext(AgePriorityEncoder((0 until numStqEntries).map(i => {
val e = stq(i).bits
e.addr.valid && e.addr_is_virtual
}), stq_commit_head))
val stq_retry_e = stq(stq_retry_idx)
val stq_commit_e = stq(stq_execute_head)
val ldq_wakeup_idx = RegNext(AgePriorityEncoder((0 until numLdqEntries).map(i=> {
val e = ldq(i).bits
val block = block_load_mask(i) || p1_block_load_mask(i)
e.addr.valid && !e.executed && !e.succeeded && !e.addr_is_virtual && !block
}), ldq_head))
val ldq_wakeup_e = ldq(ldq_wakeup_idx)
// -----------------------
// Determine what can fire
// Can we fire a incoming load
val can_fire_load_incoming = widthMap(w => exe_req(w).valid && exe_req(w).bits.uop.ctrl.is_load)
// Can we fire an incoming store addrgen + store datagen
val can_fire_stad_incoming = widthMap(w => exe_req(w).valid && exe_req(w).bits.uop.ctrl.is_sta
&& exe_req(w).bits.uop.ctrl.is_std)
// Can we fire an incoming store addrgen
val can_fire_sta_incoming = widthMap(w => exe_req(w).valid && exe_req(w).bits.uop.ctrl.is_sta
&& !exe_req(w).bits.uop.ctrl.is_std)
// Can we fire an incoming store datagen
val can_fire_std_incoming = widthMap(w => exe_req(w).valid && exe_req(w).bits.uop.ctrl.is_std
&& !exe_req(w).bits.uop.ctrl.is_sta)
// Can we fire an incoming sfence
val can_fire_sfence = widthMap(w => exe_req(w).valid && exe_req(w).bits.sfence.valid)
// Can we fire a request from dcache to release a line
// This needs to go through LDQ search to mark loads as dangerous
val can_fire_release = widthMap(w => (w == memWidth-1).B && io.dmem.release.valid)
io.dmem.release.ready := will_fire_release.reduce(_||_)
// Can we retry a load that missed in the TLB
val can_fire_load_retry = widthMap(w =>
( ldq_retry_e.valid &&
ldq_retry_e.bits.addr.valid &&
ldq_retry_e.bits.addr_is_virtual &&
!p1_block_load_mask(ldq_retry_idx) &&
!p2_block_load_mask(ldq_retry_idx) &&
RegNext(dtlb.io.miss_rdy) &&
!store_needs_order &&
(w == memWidth-1).B && // TODO: Is this best scheduling?
!ldq_retry_e.bits.order_fail))
// Can we retry a store addrgen that missed in the TLB
// - Weird edge case when sta_retry and std_incoming for same entry in same cycle. Delay this
val can_fire_sta_retry = widthMap(w =>
( stq_retry_e.valid &&
stq_retry_e.bits.addr.valid &&
stq_retry_e.bits.addr_is_virtual &&
(w == memWidth-1).B &&
RegNext(dtlb.io.miss_rdy) &&
!(widthMap(i => (i != w).B &&
can_fire_std_incoming(i) &&
stq_incoming_idx(i) === stq_retry_idx).reduce(_||_))
))
// Can we commit a store
val can_fire_store_commit = widthMap(w =>
( stq_commit_e.valid &&
!stq_commit_e.bits.uop.is_fence &&
!mem_xcpt_valid &&
!stq_commit_e.bits.uop.exception &&
(w == 0).B &&
(stq_commit_e.bits.committed || ( stq_commit_e.bits.uop.is_amo &&
stq_commit_e.bits.addr.valid &&
!stq_commit_e.bits.addr_is_virtual &&
stq_commit_e.bits.data.valid))))
// Can we wakeup a load that was nack'd
val block_load_wakeup = WireInit(false.B)
val can_fire_load_wakeup = widthMap(w =>
( ldq_wakeup_e.valid &&
ldq_wakeup_e.bits.addr.valid &&
!ldq_wakeup_e.bits.succeeded &&
!ldq_wakeup_e.bits.addr_is_virtual &&
!ldq_wakeup_e.bits.executed &&
!ldq_wakeup_e.bits.order_fail &&
!p1_block_load_mask(ldq_wakeup_idx) &&
!p2_block_load_mask(ldq_wakeup_idx) &&
!store_needs_order &&
!block_load_wakeup &&
(w == memWidth-1).B &&
(!ldq_wakeup_e.bits.addr_is_uncacheable || (io.core.commit_load_at_rob_head &&
ldq_head === ldq_wakeup_idx &&
ldq_wakeup_e.bits.st_dep_mask.asUInt === 0.U))))
// Can we fire an incoming hellacache request
val can_fire_hella_incoming = WireInit(widthMap(w => false.B)) // This is assigned to in the hellashim ocntroller
// Can we fire a hellacache request that the dcache nack'd
val can_fire_hella_wakeup = WireInit(widthMap(w => false.B)) // This is assigned to in the hellashim controller
//---------------------------------------------------------
// Controller logic. Arbitrate which request actually fires
val exe_tlb_valid = Wire(Vec(memWidth, Bool()))
for (w <- 0 until memWidth) {
var tlb_avail = true.B
var dc_avail = true.B
var lcam_avail = true.B
var rob_avail = true.B
def lsu_sched(can_fire: Bool, uses_tlb:Boolean, uses_dc:Boolean, uses_lcam: Boolean, uses_rob:Boolean): Bool = {
val will_fire = can_fire && !(uses_tlb.B && !tlb_avail) &&
!(uses_lcam.B && !lcam_avail) &&
!(uses_dc.B && !dc_avail) &&
!(uses_rob.B && !rob_avail)
tlb_avail = tlb_avail && !(will_fire && uses_tlb.B)
lcam_avail = lcam_avail && !(will_fire && uses_lcam.B)
dc_avail = dc_avail && !(will_fire && uses_dc.B)
rob_avail = rob_avail && !(will_fire && uses_rob.B)
dontTouch(will_fire) // dontTouch these so we can inspect the will_fire signals
will_fire
}
// The order of these statements is the priority
// Some restrictions
// - Incoming ops must get precedence, can't backpresure memaddrgen
// - Incoming hellacache ops must get precedence over retrying ops (PTW must get precedence over retrying translation)
// Notes on performance
// - Prioritize releases, this speeds up cache line writebacks and refills
// - Store commits are lowest priority, since they don't "block" younger instructions unless stq fills up
will_fire_load_incoming (w) := lsu_sched(can_fire_load_incoming (w) , true , true , true , false) // TLB , DC , LCAM
will_fire_stad_incoming (w) := lsu_sched(can_fire_stad_incoming (w) , true , false, true , true) // TLB , , LCAM , ROB
will_fire_sta_incoming (w) := lsu_sched(can_fire_sta_incoming (w) , true , false, true , true) // TLB , , LCAM , ROB
will_fire_std_incoming (w) := lsu_sched(can_fire_std_incoming (w) , false, false, false, true) // , ROB
will_fire_sfence (w) := lsu_sched(can_fire_sfence (w) , true , false, false, true) // TLB , , , ROB
will_fire_release (w) := lsu_sched(can_fire_release (w) , false, false, true , false) // LCAM
will_fire_hella_incoming(w) := lsu_sched(can_fire_hella_incoming(w) , true , true , false, false) // TLB , DC
will_fire_hella_wakeup (w) := lsu_sched(can_fire_hella_wakeup (w) , false, true , false, false) // , DC
will_fire_load_retry (w) := lsu_sched(can_fire_load_retry (w) , true , true , true , false) // TLB , DC , LCAM
will_fire_sta_retry (w) := lsu_sched(can_fire_sta_retry (w) , true , false, true , true) // TLB , , LCAM , ROB // TODO: This should be higher priority
will_fire_load_wakeup (w) := lsu_sched(can_fire_load_wakeup (w) , false, true , true , false) // , DC , LCAM1
will_fire_store_commit (w) := lsu_sched(can_fire_store_commit (w) , false, true , false, false) // , DC
assert(!(exe_req(w).valid && !(will_fire_load_incoming(w) || will_fire_stad_incoming(w) || will_fire_sta_incoming(w) || will_fire_std_incoming(w) || will_fire_sfence(w))))
when (will_fire_load_wakeup(w)) {
block_load_mask(ldq_wakeup_idx) := true.B
} .elsewhen (will_fire_load_incoming(w)) {
block_load_mask(exe_req(w).bits.uop.ldq_idx) := true.B
} .elsewhen (will_fire_load_retry(w)) {
block_load_mask(ldq_retry_idx) := true.B
}
exe_tlb_valid(w) := !tlb_avail
}
assert((memWidth == 1).B ||
(!(will_fire_sfence.reduce(_||_) && !will_fire_sfence.reduce(_&&_)) &&
!will_fire_hella_incoming.reduce(_&&_) &&
!will_fire_hella_wakeup.reduce(_&&_) &&
!will_fire_load_retry.reduce(_&&_) &&
!will_fire_sta_retry.reduce(_&&_) &&
!will_fire_store_commit.reduce(_&&_) &&
!will_fire_load_wakeup.reduce(_&&_)),
"Some operations is proceeding down multiple pipes")
require(memWidth <= 2)
//--------------------------------------------
// TLB Access
assert(!(hella_state =/= h_ready && hella_req.cmd === rocket.M_SFENCE),
"SFENCE through hella interface not supported")
val exe_tlb_uop = widthMap(w =>
Mux(will_fire_load_incoming (w) ||
will_fire_stad_incoming (w) ||
will_fire_sta_incoming (w) ||
will_fire_sfence (w) , exe_req(w).bits.uop,
Mux(will_fire_load_retry (w) , ldq_retry_e.bits.uop,
Mux(will_fire_sta_retry (w) , stq_retry_e.bits.uop,
Mux(will_fire_hella_incoming(w) , NullMicroOp,
NullMicroOp)))))
val exe_tlb_vaddr = widthMap(w =>
Mux(will_fire_load_incoming (w) ||
will_fire_stad_incoming (w) ||
will_fire_sta_incoming (w) , exe_req(w).bits.addr,
Mux(will_fire_sfence (w) , exe_req(w).bits.sfence.bits.addr,
Mux(will_fire_load_retry (w) , ldq_retry_e.bits.addr.bits,
Mux(will_fire_sta_retry (w) , stq_retry_e.bits.addr.bits,
Mux(will_fire_hella_incoming(w) , hella_req.addr,
0.U))))))
val exe_sfence = WireInit((0.U).asTypeOf(Valid(new rocket.SFenceReq)))
for (w <- 0 until memWidth) {
when (will_fire_sfence(w)) {
exe_sfence := exe_req(w).bits.sfence
}
}
val exe_size = widthMap(w =>
Mux(will_fire_load_incoming (w) ||
will_fire_stad_incoming (w) ||
will_fire_sta_incoming (w) ||
will_fire_sfence (w) ||
will_fire_load_retry (w) ||
will_fire_sta_retry (w) , exe_tlb_uop(w).mem_size,
Mux(will_fire_hella_incoming(w) , hella_req.size,
0.U)))
val exe_cmd = widthMap(w =>
Mux(will_fire_load_incoming (w) ||
will_fire_stad_incoming (w) ||
will_fire_sta_incoming (w) ||
will_fire_sfence (w) ||
will_fire_load_retry (w) ||
will_fire_sta_retry (w) , exe_tlb_uop(w).mem_cmd,
Mux(will_fire_hella_incoming(w) , hella_req.cmd,
0.U)))
val exe_passthr= widthMap(w =>
Mux(will_fire_hella_incoming(w) , hella_req.phys,
false.B))
val exe_kill = widthMap(w =>
Mux(will_fire_hella_incoming(w) , io.hellacache.s1_kill,
false.B))
for (w <- 0 until memWidth) {
dtlb.io.req(w).valid := exe_tlb_valid(w)
dtlb.io.req(w).bits.vaddr := exe_tlb_vaddr(w)
dtlb.io.req(w).bits.size := exe_size(w)
dtlb.io.req(w).bits.cmd := exe_cmd(w)
dtlb.io.req(w).bits.passthrough := exe_passthr(w)
dtlb.io.req(w).bits.v := io.ptw.status.v
dtlb.io.req(w).bits.prv := io.ptw.status.prv
}
dtlb.io.kill := exe_kill.reduce(_||_)
dtlb.io.sfence := exe_sfence
// exceptions
val ma_ld = widthMap(w => will_fire_load_incoming(w) && exe_req(w).bits.mxcpt.valid) // We get ma_ld in memaddrcalc
val ma_st = widthMap(w => (will_fire_sta_incoming(w) || will_fire_stad_incoming(w)) && exe_req(w).bits.mxcpt.valid) // We get ma_ld in memaddrcalc
val pf_ld = widthMap(w => dtlb.io.req(w).valid && dtlb.io.resp(w).pf.ld && exe_tlb_uop(w).uses_ldq)
val pf_st = widthMap(w => dtlb.io.req(w).valid && dtlb.io.resp(w).pf.st && exe_tlb_uop(w).uses_stq)
val ae_ld = widthMap(w => dtlb.io.req(w).valid && dtlb.io.resp(w).ae.ld && exe_tlb_uop(w).uses_ldq)
val ae_st = widthMap(w => dtlb.io.req(w).valid && dtlb.io.resp(w).ae.st && exe_tlb_uop(w).uses_stq)
// TODO check for xcpt_if and verify that never happens on non-speculative instructions.
val mem_xcpt_valids = RegNext(widthMap(w =>
(pf_ld(w) || pf_st(w) || ae_ld(w) || ae_st(w) || ma_ld(w) || ma_st(w)) &&
!io.core.exception &&
!IsKilledByBranch(io.core.brupdate, exe_tlb_uop(w))))
val mem_xcpt_uops = RegNext(widthMap(w => UpdateBrMask(io.core.brupdate, exe_tlb_uop(w))))
val mem_xcpt_causes = RegNext(widthMap(w =>
Mux(ma_ld(w), rocket.Causes.misaligned_load.U,
Mux(ma_st(w), rocket.Causes.misaligned_store.U,
Mux(pf_ld(w), rocket.Causes.load_page_fault.U,
Mux(pf_st(w), rocket.Causes.store_page_fault.U,
Mux(ae_ld(w), rocket.Causes.load_access.U,
rocket.Causes.store_access.U)))))))
val mem_xcpt_vaddrs = RegNext(exe_tlb_vaddr)
for (w <- 0 until memWidth) {
assert (!(dtlb.io.req(w).valid && exe_tlb_uop(w).is_fence), "Fence is pretending to talk to the TLB")
assert (!((will_fire_load_incoming(w) || will_fire_sta_incoming(w) || will_fire_stad_incoming(w)) &&
exe_req(w).bits.mxcpt.valid && dtlb.io.req(w).valid &&
!(exe_tlb_uop(w).ctrl.is_load || exe_tlb_uop(w).ctrl.is_sta)),
"A uop that's not a load or store-address is throwing a memory exception.")
}
mem_xcpt_valid := mem_xcpt_valids.reduce(_||_)
mem_xcpt_cause := mem_xcpt_causes(0)
mem_xcpt_uop := mem_xcpt_uops(0)
mem_xcpt_vaddr := mem_xcpt_vaddrs(0)
var xcpt_found = mem_xcpt_valids(0)
var oldest_xcpt_rob_idx = mem_xcpt_uops(0).rob_idx
for (w <- 1 until memWidth) {
val is_older = WireInit(false.B)
when (mem_xcpt_valids(w) &&
(IsOlder(mem_xcpt_uops(w).rob_idx, oldest_xcpt_rob_idx, io.core.rob_head_idx) || !xcpt_found)) {
is_older := true.B
mem_xcpt_cause := mem_xcpt_causes(w)
mem_xcpt_uop := mem_xcpt_uops(w)
mem_xcpt_vaddr := mem_xcpt_vaddrs(w)
}
xcpt_found = xcpt_found || mem_xcpt_valids(w)
oldest_xcpt_rob_idx = Mux(is_older, mem_xcpt_uops(w).rob_idx, oldest_xcpt_rob_idx)
}
val exe_tlb_miss = widthMap(w => dtlb.io.req(w).valid && (dtlb.io.resp(w).miss || !dtlb.io.req(w).ready))
val exe_tlb_paddr = widthMap(w => Cat(dtlb.io.resp(w).paddr(paddrBits-1,corePgIdxBits),
exe_tlb_vaddr(w)(corePgIdxBits-1,0)))
val exe_tlb_uncacheable = widthMap(w => !(dtlb.io.resp(w).cacheable))
for (w <- 0 until memWidth) {
assert (exe_tlb_paddr(w) === dtlb.io.resp(w).paddr || exe_req(w).bits.sfence.valid, "[lsu] paddrs should match.")
when (mem_xcpt_valids(w))
{
assert(RegNext(will_fire_load_incoming(w) || will_fire_stad_incoming(w) || will_fire_sta_incoming(w) ||
will_fire_load_retry(w) || will_fire_sta_retry(w)))
// Technically only faulting AMOs need this
assert(mem_xcpt_uops(w).uses_ldq ^ mem_xcpt_uops(w).uses_stq)
when (mem_xcpt_uops(w).uses_ldq)
{
ldq(mem_xcpt_uops(w).ldq_idx).bits.uop.exception := true.B
}
.otherwise
{
stq(mem_xcpt_uops(w).stq_idx).bits.uop.exception := true.B
}
}
}
//------------------------------
// Issue Someting to Memory
//
// A memory op can come from many different places
// The address either was freshly translated, or we are
// reading a physical address from the LDQ,STQ, or the HellaCache adapter
// defaults
io.dmem.brupdate := io.core.brupdate
io.dmem.exception := io.core.exception
io.dmem.rob_head_idx := io.core.rob_head_idx
io.dmem.rob_pnr_idx := io.core.rob_pnr_idx
val dmem_req = Wire(Vec(memWidth, Valid(new BoomDCacheReq)))
io.dmem.req.valid := dmem_req.map(_.valid).reduce(_||_)
io.dmem.req.bits := dmem_req
val dmem_req_fire = widthMap(w => dmem_req(w).valid && io.dmem.req.fire)
val s0_executing_loads = WireInit(VecInit((0 until numLdqEntries).map(x=>false.B)))
for (w <- 0 until memWidth) {
dmem_req(w).valid := false.B
dmem_req(w).bits.uop := NullMicroOp
dmem_req(w).bits.addr := 0.U
dmem_req(w).bits.data := 0.U
dmem_req(w).bits.is_hella := false.B
io.dmem.s1_kill(w) := false.B
when (will_fire_load_incoming(w)) {
dmem_req(w).valid := !exe_tlb_miss(w) && !exe_tlb_uncacheable(w)
dmem_req(w).bits.addr := exe_tlb_paddr(w)
dmem_req(w).bits.uop := exe_tlb_uop(w)
s0_executing_loads(ldq_incoming_idx(w)) := dmem_req_fire(w)
assert(!ldq_incoming_e(w).bits.executed)
} .elsewhen (will_fire_load_retry(w)) {
dmem_req(w).valid := !exe_tlb_miss(w) && !exe_tlb_uncacheable(w)
dmem_req(w).bits.addr := exe_tlb_paddr(w)
dmem_req(w).bits.uop := exe_tlb_uop(w)
s0_executing_loads(ldq_retry_idx) := dmem_req_fire(w)
assert(!ldq_retry_e.bits.executed)
} .elsewhen (will_fire_store_commit(w)) {
dmem_req(w).valid := true.B
dmem_req(w).bits.addr := stq_commit_e.bits.addr.bits
dmem_req(w).bits.data := (new freechips.rocketchip.rocket.StoreGen(
stq_commit_e.bits.uop.mem_size, 0.U,
stq_commit_e.bits.data.bits,
coreDataBytes)).data
dmem_req(w).bits.uop := stq_commit_e.bits.uop
stq_execute_head := Mux(dmem_req_fire(w),
WrapInc(stq_execute_head, numStqEntries),
stq_execute_head)
stq(stq_execute_head).bits.succeeded := false.B
} .elsewhen (will_fire_load_wakeup(w)) {
dmem_req(w).valid := true.B
dmem_req(w).bits.addr := ldq_wakeup_e.bits.addr.bits
dmem_req(w).bits.uop := ldq_wakeup_e.bits.uop
s0_executing_loads(ldq_wakeup_idx) := dmem_req_fire(w)
assert(!ldq_wakeup_e.bits.executed && !ldq_wakeup_e.bits.addr_is_virtual)
} .elsewhen (will_fire_hella_incoming(w)) {
assert(hella_state === h_s1)
dmem_req(w).valid := !io.hellacache.s1_kill && (!exe_tlb_miss(w) || hella_req.phys)
dmem_req(w).bits.addr := exe_tlb_paddr(w)
dmem_req(w).bits.data := (new freechips.rocketchip.rocket.StoreGen(
hella_req.size, 0.U,
io.hellacache.s1_data.data,
coreDataBytes)).data
dmem_req(w).bits.uop.mem_cmd := hella_req.cmd
dmem_req(w).bits.uop.mem_size := hella_req.size
dmem_req(w).bits.uop.mem_signed := hella_req.signed
dmem_req(w).bits.is_hella := true.B
hella_paddr := exe_tlb_paddr(w)
}
.elsewhen (will_fire_hella_wakeup(w))
{
assert(hella_state === h_replay)
dmem_req(w).valid := true.B
dmem_req(w).bits.addr := hella_paddr
dmem_req(w).bits.data := (new freechips.rocketchip.rocket.StoreGen(
hella_req.size, 0.U,
hella_data.data,
coreDataBytes)).data
dmem_req(w).bits.uop.mem_cmd := hella_req.cmd
dmem_req(w).bits.uop.mem_size := hella_req.size
dmem_req(w).bits.uop.mem_signed := hella_req.signed
dmem_req(w).bits.is_hella := true.B
}
//-------------------------------------------------------------
// Write Addr into the LAQ/SAQ
when (will_fire_load_incoming(w) || will_fire_load_retry(w))
{
val ldq_idx = Mux(will_fire_load_incoming(w), ldq_incoming_idx(w), ldq_retry_idx)
ldq(ldq_idx).bits.addr.valid := true.B
ldq(ldq_idx).bits.addr.bits := Mux(exe_tlb_miss(w), exe_tlb_vaddr(w), exe_tlb_paddr(w))
ldq(ldq_idx).bits.uop.pdst := exe_tlb_uop(w).pdst
ldq(ldq_idx).bits.addr_is_virtual := exe_tlb_miss(w)
ldq(ldq_idx).bits.addr_is_uncacheable := exe_tlb_uncacheable(w) && !exe_tlb_miss(w)
assert(!(will_fire_load_incoming(w) && ldq_incoming_e(w).bits.addr.valid),
"[lsu] Incoming load is overwriting a valid address")
}
when (will_fire_sta_incoming(w) || will_fire_stad_incoming(w) || will_fire_sta_retry(w))
{
val stq_idx = Mux(will_fire_sta_incoming(w) || will_fire_stad_incoming(w),
stq_incoming_idx(w), stq_retry_idx)
stq(stq_idx).bits.addr.valid := !pf_st(w) // Prevent AMOs from executing!
stq(stq_idx).bits.addr.bits := Mux(exe_tlb_miss(w), exe_tlb_vaddr(w), exe_tlb_paddr(w))
stq(stq_idx).bits.uop.pdst := exe_tlb_uop(w).pdst // Needed for AMOs
stq(stq_idx).bits.addr_is_virtual := exe_tlb_miss(w)
assert(!(will_fire_sta_incoming(w) && stq_incoming_e(w).bits.addr.valid),
"[lsu] Incoming store is overwriting a valid address")
}
//-------------------------------------------------------------
// Write data into the STQ
if (w == 0)
io.core.fp_stdata.ready := !will_fire_std_incoming(w) && !will_fire_stad_incoming(w)
val fp_stdata_fire = io.core.fp_stdata.fire && (w == 0).B
when (will_fire_std_incoming(w) || will_fire_stad_incoming(w) || fp_stdata_fire)
{
val sidx = Mux(will_fire_std_incoming(w) || will_fire_stad_incoming(w),
stq_incoming_idx(w),
io.core.fp_stdata.bits.uop.stq_idx)
stq(sidx).bits.data.valid := true.B
stq(sidx).bits.data.bits := Mux(will_fire_std_incoming(w) || will_fire_stad_incoming(w),
exe_req(w).bits.data,
io.core.fp_stdata.bits.data)
assert(!(stq(sidx).bits.data.valid),
"[lsu] Incoming store is overwriting a valid data entry")
}
}
val will_fire_stdf_incoming = io.core.fp_stdata.fire
require (xLen >= fLen) // for correct SDQ size
//-------------------------------------------------------------
//-------------------------------------------------------------
// Cache Access Cycle (Mem)
//-------------------------------------------------------------
//-------------------------------------------------------------
// Note the DCache may not have accepted our request
val exe_req_killed = widthMap(w => IsKilledByBranch(io.core.brupdate, exe_req(w).bits.uop))
val stdf_killed = IsKilledByBranch(io.core.brupdate, io.core.fp_stdata.bits.uop)
val fired_load_incoming = widthMap(w => RegNext(will_fire_load_incoming(w) && !exe_req_killed(w)))
val fired_stad_incoming = widthMap(w => RegNext(will_fire_stad_incoming(w) && !exe_req_killed(w)))
val fired_sta_incoming = widthMap(w => RegNext(will_fire_sta_incoming (w) && !exe_req_killed(w)))
val fired_std_incoming = widthMap(w => RegNext(will_fire_std_incoming (w) && !exe_req_killed(w)))
val fired_stdf_incoming = RegNext(will_fire_stdf_incoming && !stdf_killed)
val fired_sfence = RegNext(will_fire_sfence)
val fired_release = RegNext(will_fire_release)
val fired_load_retry = widthMap(w => RegNext(will_fire_load_retry (w) && !IsKilledByBranch(io.core.brupdate, ldq_retry_e.bits.uop)))
val fired_sta_retry = widthMap(w => RegNext(will_fire_sta_retry (w) && !IsKilledByBranch(io.core.brupdate, stq_retry_e.bits.uop)))
val fired_store_commit = RegNext(will_fire_store_commit)
val fired_load_wakeup = widthMap(w => RegNext(will_fire_load_wakeup (w) && !IsKilledByBranch(io.core.brupdate, ldq_wakeup_e.bits.uop)))
val fired_hella_incoming = RegNext(will_fire_hella_incoming)
val fired_hella_wakeup = RegNext(will_fire_hella_wakeup)
val mem_incoming_uop = RegNext(widthMap(w => UpdateBrMask(io.core.brupdate, exe_req(w).bits.uop)))
val mem_ldq_incoming_e = RegNext(widthMap(w => UpdateBrMask(io.core.brupdate, ldq_incoming_e(w))))
val mem_stq_incoming_e = RegNext(widthMap(w => UpdateBrMask(io.core.brupdate, stq_incoming_e(w))))
val mem_ldq_wakeup_e = RegNext(UpdateBrMask(io.core.brupdate, ldq_wakeup_e))
val mem_ldq_retry_e = RegNext(UpdateBrMask(io.core.brupdate, ldq_retry_e))
val mem_stq_retry_e = RegNext(UpdateBrMask(io.core.brupdate, stq_retry_e))
val mem_ldq_e = widthMap(w =>
Mux(fired_load_incoming(w), mem_ldq_incoming_e(w),
Mux(fired_load_retry (w), mem_ldq_retry_e,
Mux(fired_load_wakeup (w), mem_ldq_wakeup_e, (0.U).asTypeOf(Valid(new LDQEntry))))))
val mem_stq_e = widthMap(w =>
Mux(fired_stad_incoming(w) ||
fired_sta_incoming (w), mem_stq_incoming_e(w),
Mux(fired_sta_retry (w), mem_stq_retry_e, (0.U).asTypeOf(Valid(new STQEntry)))))
val mem_stdf_uop = RegNext(UpdateBrMask(io.core.brupdate, io.core.fp_stdata.bits.uop))
val mem_tlb_miss = RegNext(exe_tlb_miss)
val mem_tlb_uncacheable = RegNext(exe_tlb_uncacheable)
val mem_paddr = RegNext(widthMap(w => dmem_req(w).bits.addr))
// Task 1: Clr ROB busy bit
val clr_bsy_valid = RegInit(widthMap(w => false.B))
val clr_bsy_rob_idx = Reg(Vec(memWidth, UInt(robAddrSz.W)))
val clr_bsy_brmask = Reg(Vec(memWidth, UInt(maxBrCount.W)))
for (w <- 0 until memWidth) {
clr_bsy_valid (w) := false.B
clr_bsy_rob_idx (w) := 0.U
clr_bsy_brmask (w) := 0.U
when (fired_stad_incoming(w)) {
clr_bsy_valid (w) := mem_stq_incoming_e(w).valid &&
!mem_tlb_miss(w) &&
!mem_stq_incoming_e(w).bits.uop.is_amo &&
!IsKilledByBranch(io.core.brupdate, mem_stq_incoming_e(w).bits.uop)
clr_bsy_rob_idx (w) := mem_stq_incoming_e(w).bits.uop.rob_idx
clr_bsy_brmask (w) := GetNewBrMask(io.core.brupdate, mem_stq_incoming_e(w).bits.uop)
} .elsewhen (fired_sta_incoming(w)) {
clr_bsy_valid (w) := mem_stq_incoming_e(w).valid &&
mem_stq_incoming_e(w).bits.data.valid &&
!mem_tlb_miss(w) &&
!mem_stq_incoming_e(w).bits.uop.is_amo &&
!IsKilledByBranch(io.core.brupdate, mem_stq_incoming_e(w).bits.uop)
clr_bsy_rob_idx (w) := mem_stq_incoming_e(w).bits.uop.rob_idx
clr_bsy_brmask (w) := GetNewBrMask(io.core.brupdate, mem_stq_incoming_e(w).bits.uop)
} .elsewhen (fired_std_incoming(w)) {
clr_bsy_valid (w) := mem_stq_incoming_e(w).valid &&
mem_stq_incoming_e(w).bits.addr.valid &&
!mem_stq_incoming_e(w).bits.addr_is_virtual &&
!mem_stq_incoming_e(w).bits.uop.is_amo &&
!IsKilledByBranch(io.core.brupdate, mem_stq_incoming_e(w).bits.uop)
clr_bsy_rob_idx (w) := mem_stq_incoming_e(w).bits.uop.rob_idx
clr_bsy_brmask (w) := GetNewBrMask(io.core.brupdate, mem_stq_incoming_e(w).bits.uop)
} .elsewhen (fired_sfence(w)) {
clr_bsy_valid (w) := (w == 0).B // SFence proceeds down all paths, only allow one to clr the rob
clr_bsy_rob_idx (w) := mem_incoming_uop(w).rob_idx
clr_bsy_brmask (w) := GetNewBrMask(io.core.brupdate, mem_incoming_uop(w))
} .elsewhen (fired_sta_retry(w)) {
clr_bsy_valid (w) := mem_stq_retry_e.valid &&
mem_stq_retry_e.bits.data.valid &&
!mem_tlb_miss(w) &&
!mem_stq_retry_e.bits.uop.is_amo &&
!IsKilledByBranch(io.core.brupdate, mem_stq_retry_e.bits.uop)
clr_bsy_rob_idx (w) := mem_stq_retry_e.bits.uop.rob_idx
clr_bsy_brmask (w) := GetNewBrMask(io.core.brupdate, mem_stq_retry_e.bits.uop)
}
io.core.clr_bsy(w).valid := clr_bsy_valid(w) &&
!IsKilledByBranch(io.core.brupdate, clr_bsy_brmask(w)) &&
!io.core.exception && !RegNext(io.core.exception) && !RegNext(RegNext(io.core.exception))
io.core.clr_bsy(w).bits := clr_bsy_rob_idx(w)
}
val stdf_clr_bsy_valid = RegInit(false.B)
val stdf_clr_bsy_rob_idx = Reg(UInt(robAddrSz.W))
val stdf_clr_bsy_brmask = Reg(UInt(maxBrCount.W))
stdf_clr_bsy_valid := false.B
stdf_clr_bsy_rob_idx := 0.U
stdf_clr_bsy_brmask := 0.U
when (fired_stdf_incoming) {
val s_idx = mem_stdf_uop.stq_idx
stdf_clr_bsy_valid := stq(s_idx).valid &&
stq(s_idx).bits.addr.valid &&
!stq(s_idx).bits.addr_is_virtual &&
!stq(s_idx).bits.uop.is_amo &&
!IsKilledByBranch(io.core.brupdate, mem_stdf_uop)
stdf_clr_bsy_rob_idx := mem_stdf_uop.rob_idx
stdf_clr_bsy_brmask := GetNewBrMask(io.core.brupdate, mem_stdf_uop)
}
io.core.clr_bsy(memWidth).valid := stdf_clr_bsy_valid &&
!IsKilledByBranch(io.core.brupdate, stdf_clr_bsy_brmask) &&
!io.core.exception && !RegNext(io.core.exception) && !RegNext(RegNext(io.core.exception))
io.core.clr_bsy(memWidth).bits := stdf_clr_bsy_rob_idx
// Task 2: Do LD-LD. ST-LD searches for ordering failures
// Do LD-ST search for forwarding opportunities
// We have the opportunity to kill a request we sent last cycle. Use it wisely!
// We translated a store last cycle
val do_st_search = widthMap(w => (fired_stad_incoming(w) || fired_sta_incoming(w) || fired_sta_retry(w)) && !mem_tlb_miss(w))
// We translated a load last cycle
val do_ld_search = widthMap(w => ((fired_load_incoming(w) || fired_load_retry(w)) && !mem_tlb_miss(w)) ||
fired_load_wakeup(w))
// We are making a local line visible to other harts
val do_release_search = widthMap(w => fired_release(w))
// Store addrs don't go to memory yet, get it from the TLB response
// Load wakeups don't go through TLB, get it through memory
// Load incoming and load retries go through both
val lcam_addr = widthMap(w => Mux(fired_stad_incoming(w) || fired_sta_incoming(w) || fired_sta_retry(w),
RegNext(exe_tlb_paddr(w)),
Mux(fired_release(w), RegNext(io.dmem.release.bits.address),
mem_paddr(w))))
val lcam_uop = widthMap(w => Mux(do_st_search(w), mem_stq_e(w).bits.uop,
Mux(do_ld_search(w), mem_ldq_e(w).bits.uop, NullMicroOp)))
val lcam_mask = widthMap(w => GenByteMask(lcam_addr(w), lcam_uop(w).mem_size))
val lcam_st_dep_mask = widthMap(w => mem_ldq_e(w).bits.st_dep_mask)
val lcam_is_release = widthMap(w => fired_release(w))
val lcam_ldq_idx = widthMap(w =>
Mux(fired_load_incoming(w), mem_incoming_uop(w).ldq_idx,
Mux(fired_load_wakeup (w), RegNext(ldq_wakeup_idx),
Mux(fired_load_retry (w), RegNext(ldq_retry_idx), 0.U))))
val lcam_stq_idx = widthMap(w =>
Mux(fired_stad_incoming(w) ||
fired_sta_incoming (w), mem_incoming_uop(w).stq_idx,
Mux(fired_sta_retry (w), RegNext(stq_retry_idx), 0.U)))
val can_forward = WireInit(widthMap(w =>
Mux(fired_load_incoming(w) || fired_load_retry(w), !mem_tlb_uncacheable(w),
!ldq(lcam_ldq_idx(w)).bits.addr_is_uncacheable)))
// Mask of stores which we conflict on address with
val ldst_addr_matches = WireInit(widthMap(w => VecInit((0 until numStqEntries).map(x=>false.B))))
// Mask of stores which we can forward from
val ldst_forward_matches = WireInit(widthMap(w => VecInit((0 until numStqEntries).map(x=>false.B))))
val failed_loads = WireInit(VecInit((0 until numLdqEntries).map(x=>false.B))) // Loads which we will report as failures (throws a mini-exception)
val nacking_loads = WireInit(VecInit((0 until numLdqEntries).map(x=>false.B))) // Loads which are being nacked by dcache in the next stage
val s1_executing_loads = RegNext(s0_executing_loads)
val s1_set_execute = WireInit(s1_executing_loads)
val mem_forward_valid = Wire(Vec(memWidth, Bool()))
val mem_forward_ldq_idx = lcam_ldq_idx
val mem_forward_ld_addr = lcam_addr
val mem_forward_stq_idx = Wire(Vec(memWidth, UInt(log2Ceil(numStqEntries).W)))
val wb_forward_valid = RegNext(mem_forward_valid)
val wb_forward_ldq_idx = RegNext(mem_forward_ldq_idx)
val wb_forward_ld_addr = RegNext(mem_forward_ld_addr)
val wb_forward_stq_idx = RegNext(mem_forward_stq_idx)
for (i <- 0 until numLdqEntries) {
val l_valid = ldq(i).valid
val l_bits = ldq(i).bits
val l_addr = ldq(i).bits.addr.bits
val l_mask = GenByteMask(l_addr, l_bits.uop.mem_size)
val l_forwarders = widthMap(w => wb_forward_valid(w) && wb_forward_ldq_idx(w) === i.U)
val l_is_forwarding = l_forwarders.reduce(_||_)
val l_forward_stq_idx = Mux(l_is_forwarding, Mux1H(l_forwarders, wb_forward_stq_idx), l_bits.forward_stq_idx)
val block_addr_matches = widthMap(w => lcam_addr(w) >> blockOffBits === l_addr >> blockOffBits)
val dword_addr_matches = widthMap(w => block_addr_matches(w) && lcam_addr(w)(blockOffBits-1,3) === l_addr(blockOffBits-1,3))
val mask_match = widthMap(w => (l_mask & lcam_mask(w)) === l_mask)
val mask_overlap = widthMap(w => (l_mask & lcam_mask(w)).orR)
// Searcher is a store
for (w <- 0 until memWidth) {
when (do_release_search(w) &&
l_valid &&
l_bits.addr.valid &&
block_addr_matches(w)) {
// This load has been observed, so if a younger load to the same address has not
// executed yet, this load must be squashed
ldq(i).bits.observed := true.B
} .elsewhen (do_st_search(w) &&
l_valid &&
l_bits.addr.valid &&
(l_bits.executed || l_bits.succeeded || l_is_forwarding) &&
!l_bits.addr_is_virtual &&
l_bits.st_dep_mask(lcam_stq_idx(w)) &&
dword_addr_matches(w) &&
mask_overlap(w)) {
val forwarded_is_older = IsOlder(l_forward_stq_idx, lcam_stq_idx(w), l_bits.youngest_stq_idx)
// We are older than this load, which overlapped us.
when (!l_bits.forward_std_val || // If the load wasn't forwarded, it definitely failed
((l_forward_stq_idx =/= lcam_stq_idx(w)) && forwarded_is_older)) { // If the load forwarded from us, we might be ok
ldq(i).bits.order_fail := true.B
failed_loads(i) := true.B
}
} .elsewhen (do_ld_search(w) &&
l_valid &&
l_bits.addr.valid &&
!l_bits.addr_is_virtual &&
dword_addr_matches(w) &&
mask_overlap(w)) {
val searcher_is_older = IsOlder(lcam_ldq_idx(w), i.U, ldq_head)
when (searcher_is_older) {
when ((l_bits.executed || l_bits.succeeded || l_is_forwarding) &&
!s1_executing_loads(i) && // If the load is proceeding in parallel we don't need to kill it
l_bits.observed) { // Its only a ordering failure if the cache line was observed between the younger load and us
ldq(i).bits.order_fail := true.B
failed_loads(i) := true.B
}
} .elsewhen (lcam_ldq_idx(w) =/= i.U) {
// The load is older, and either it hasn't executed, it was nacked, or it is ignoring its response
// we need to kill ourselves, and prevent forwarding
val older_nacked = nacking_loads(i) || RegNext(nacking_loads(i))
when (!(l_bits.executed || l_bits.succeeded) || older_nacked) {
s1_set_execute(lcam_ldq_idx(w)) := false.B
io.dmem.s1_kill(w) := RegNext(dmem_req_fire(w))
can_forward(w) := false.B
}
}
}
}
}
for (i <- 0 until numStqEntries) {
val s_addr = stq(i).bits.addr.bits
val s_uop = stq(i).bits.uop
val dword_addr_matches = widthMap(w =>
( stq(i).bits.addr.valid &&
!stq(i).bits.addr_is_virtual &&
(s_addr(corePAddrBits-1,3) === lcam_addr(w)(corePAddrBits-1,3))))
val write_mask = GenByteMask(s_addr, s_uop.mem_size)
for (w <- 0 until memWidth) {
when (do_ld_search(w) && stq(i).valid && lcam_st_dep_mask(w)(i)) {
when (((lcam_mask(w) & write_mask) === lcam_mask(w)) && !s_uop.is_fence && !s_uop.is_amo && dword_addr_matches(w) && can_forward(w))
{
ldst_addr_matches(w)(i) := true.B
ldst_forward_matches(w)(i) := true.B
io.dmem.s1_kill(w) := RegNext(dmem_req_fire(w))
s1_set_execute(lcam_ldq_idx(w)) := false.B
}
.elsewhen (((lcam_mask(w) & write_mask) =/= 0.U) && dword_addr_matches(w))
{
ldst_addr_matches(w)(i) := true.B
io.dmem.s1_kill(w) := RegNext(dmem_req_fire(w))
s1_set_execute(lcam_ldq_idx(w)) := false.B
}
.elsewhen (s_uop.is_fence || s_uop.is_amo)
{
ldst_addr_matches(w)(i) := true.B
io.dmem.s1_kill(w) := RegNext(dmem_req_fire(w))
s1_set_execute(lcam_ldq_idx(w)) := false.B
}
}
}
}
// Set execute bit in LDQ
for (i <- 0 until numLdqEntries) {
when (s1_set_execute(i)) { ldq(i).bits.executed := true.B }
}
// Find the youngest store which the load is dependent on
val forwarding_age_logic = Seq.fill(memWidth) { Module(new ForwardingAgeLogic(numStqEntries)) }
for (w <- 0 until memWidth) {
forwarding_age_logic(w).io.addr_matches := ldst_addr_matches(w).asUInt
forwarding_age_logic(w).io.youngest_st_idx := lcam_uop(w).stq_idx
}
val forwarding_idx = widthMap(w => forwarding_age_logic(w).io.forwarding_idx)
// Forward if st-ld forwarding is possible from the writemask and loadmask
mem_forward_valid := widthMap(w =>
(ldst_forward_matches(w)(forwarding_idx(w)) &&
!IsKilledByBranch(io.core.brupdate, lcam_uop(w)) &&
!io.core.exception && !RegNext(io.core.exception)))
mem_forward_stq_idx := forwarding_idx
// Avoid deadlock with a 1-w LSU prioritizing load wakeups > store commits
// On a 2W machine, load wakeups and store commits occupy separate pipelines,
// so only add this logic for 1-w LSU
if (memWidth == 1) {
// Wakeups may repeatedly find a st->ld addr conflict and fail to forward,
// repeated wakeups may block the store from ever committing
// Disallow load wakeups 1 cycle after this happens to allow the stores to drain
when (RegNext(ldst_addr_matches(0).reduce(_||_) && !mem_forward_valid(0))) {
block_load_wakeup := true.B
}
// If stores remain blocked for 15 cycles, block load wakeups to get a store through
val store_blocked_counter = Reg(UInt(4.W))
when (will_fire_store_commit(0) || !can_fire_store_commit(0)) {
store_blocked_counter := 0.U
} .elsewhen (can_fire_store_commit(0) && !will_fire_store_commit(0)) {
store_blocked_counter := Mux(store_blocked_counter === 15.U, 15.U, store_blocked_counter + 1.U)
}
when (store_blocked_counter === 15.U) {
block_load_wakeup := true.B
}
}
// Task 3: Clr unsafe bit in ROB for succesful translations
// Delay this a cycle to avoid going ahead of the exception broadcast
// The unsafe bit is cleared on the first translation, so no need to fire for load wakeups
for (w <- 0 until memWidth) {
io.core.clr_unsafe(w).valid := RegNext((do_st_search(w) || do_ld_search(w)) && !fired_load_wakeup(w)) && false.B
io.core.clr_unsafe(w).bits := RegNext(lcam_uop(w).rob_idx)
}
// detect which loads get marked as failures, but broadcast to the ROB the oldest failing load
// TODO encapsulate this in an age-based priority-encoder
// val l_idx = AgePriorityEncoder((Vec(Vec.tabulate(numLdqEntries)(i => failed_loads(i) && i.U >= laq_head)
// ++ failed_loads)).asUInt)
val temp_bits = (VecInit(VecInit.tabulate(numLdqEntries)(i =>
failed_loads(i) && i.U >= ldq_head) ++ failed_loads)).asUInt
val l_idx = PriorityEncoder(temp_bits)
// one exception port, but multiple causes!
// - 1) the incoming store-address finds a faulting load (it is by definition younger)
// - 2) the incoming load or store address is excepting. It must be older and thus takes precedent.
val r_xcpt_valid = RegInit(false.B)
val r_xcpt = Reg(new Exception)
val ld_xcpt_valid = failed_loads.reduce(_|_)
val ld_xcpt_uop = ldq(Mux(l_idx >= numLdqEntries.U, l_idx - numLdqEntries.U, l_idx)).bits.uop
val use_mem_xcpt = (mem_xcpt_valid && IsOlder(mem_xcpt_uop.rob_idx, ld_xcpt_uop.rob_idx, io.core.rob_head_idx)) || !ld_xcpt_valid
val xcpt_uop = Mux(use_mem_xcpt, mem_xcpt_uop, ld_xcpt_uop)
r_xcpt_valid := (ld_xcpt_valid || mem_xcpt_valid) &&
!io.core.exception &&
!IsKilledByBranch(io.core.brupdate, xcpt_uop)
r_xcpt.uop := xcpt_uop
r_xcpt.uop.br_mask := GetNewBrMask(io.core.brupdate, xcpt_uop)
r_xcpt.cause := Mux(use_mem_xcpt, mem_xcpt_cause, MINI_EXCEPTION_MEM_ORDERING)
r_xcpt.badvaddr := mem_xcpt_vaddr // TODO is there another register we can use instead?
io.core.lxcpt.valid := r_xcpt_valid && !io.core.exception && !IsKilledByBranch(io.core.brupdate, r_xcpt.uop)
io.core.lxcpt.bits := r_xcpt
// Task 4: Speculatively wakeup loads 1 cycle before they come back
for (w <- 0 until memWidth) {
io.core.spec_ld_wakeup(w).valid := enableFastLoadUse.B &&
fired_load_incoming(w) &&
!mem_incoming_uop(w).fp_val &&
mem_incoming_uop(w).pdst =/= 0.U
io.core.spec_ld_wakeup(w).bits := mem_incoming_uop(w).pdst
}
//-------------------------------------------------------------
//-------------------------------------------------------------
// Writeback Cycle (St->Ld Forwarding Path)
//-------------------------------------------------------------
//-------------------------------------------------------------
// Handle Memory Responses and nacks
//----------------------------------
for (w <- 0 until memWidth) {
io.core.exe(w).iresp.valid := false.B
io.core.exe(w).iresp.bits := DontCare
io.core.exe(w).fresp.valid := false.B
io.core.exe(w).fresp.bits := DontCare
}
val dmem_resp_fired = WireInit(widthMap(w => false.B))
for (w <- 0 until memWidth) {
// Handle nacks
when (io.dmem.nack(w).valid)
{
// We have to re-execute this!
when (io.dmem.nack(w).bits.is_hella)
{
assert(hella_state === h_wait || hella_state === h_dead)
}
.elsewhen (io.dmem.nack(w).bits.uop.uses_ldq)
{
assert(ldq(io.dmem.nack(w).bits.uop.ldq_idx).bits.executed)
ldq(io.dmem.nack(w).bits.uop.ldq_idx).bits.executed := false.B
nacking_loads(io.dmem.nack(w).bits.uop.ldq_idx) := true.B
}
.otherwise
{
assert(io.dmem.nack(w).bits.uop.uses_stq)
when (IsOlder(io.dmem.nack(w).bits.uop.stq_idx, stq_execute_head, stq_head)) {
stq_execute_head := io.dmem.nack(w).bits.uop.stq_idx
}
}
}
// Handle the response
when (io.dmem.resp(w).valid)
{
when (io.dmem.resp(w).bits.uop.uses_ldq)
{
assert(!io.dmem.resp(w).bits.is_hella)
val ldq_idx = io.dmem.resp(w).bits.uop.ldq_idx
val send_iresp = ldq(ldq_idx).bits.uop.dst_rtype === RT_FIX
val send_fresp = ldq(ldq_idx).bits.uop.dst_rtype === RT_FLT
io.core.exe(w).iresp.bits.uop := ldq(ldq_idx).bits.uop
io.core.exe(w).fresp.bits.uop := ldq(ldq_idx).bits.uop
io.core.exe(w).iresp.valid := send_iresp
io.core.exe(w).iresp.bits.data := io.dmem.resp(w).bits.data
io.core.exe(w).fresp.valid := send_fresp
io.core.exe(w).fresp.bits.data := io.dmem.resp(w).bits.data
assert(send_iresp ^ send_fresp)
dmem_resp_fired(w) := true.B
ldq(ldq_idx).bits.succeeded := io.core.exe(w).iresp.valid || io.core.exe(w).fresp.valid
ldq(ldq_idx).bits.debug_wb_data := io.dmem.resp(w).bits.data
}
.elsewhen (io.dmem.resp(w).bits.uop.uses_stq)
{
assert(!io.dmem.resp(w).bits.is_hella)
stq(io.dmem.resp(w).bits.uop.stq_idx).bits.succeeded := true.B
when (io.dmem.resp(w).bits.uop.is_amo) {
dmem_resp_fired(w) := true.B
io.core.exe(w).iresp.valid := true.B
io.core.exe(w).iresp.bits.uop := stq(io.dmem.resp(w).bits.uop.stq_idx).bits.uop
io.core.exe(w).iresp.bits.data := io.dmem.resp(w).bits.data
stq(io.dmem.resp(w).bits.uop.stq_idx).bits.debug_wb_data := io.dmem.resp(w).bits.data
}
}
}
when (dmem_resp_fired(w) && wb_forward_valid(w))
{
// Twiddle thumbs. Can't forward because dcache response takes precedence
}
.elsewhen (!dmem_resp_fired(w) && wb_forward_valid(w))
{
val f_idx = wb_forward_ldq_idx(w)
val forward_uop = ldq(f_idx).bits.uop
val stq_e = stq(wb_forward_stq_idx(w))
val data_ready = stq_e.bits.data.valid
val live = !IsKilledByBranch(io.core.brupdate, forward_uop)
val storegen = new freechips.rocketchip.rocket.StoreGen(
stq_e.bits.uop.mem_size, stq_e.bits.addr.bits,
stq_e.bits.data.bits, coreDataBytes)
val loadgen = new freechips.rocketchip.rocket.LoadGen(
forward_uop.mem_size, forward_uop.mem_signed,
wb_forward_ld_addr(w),
storegen.data, false.B, coreDataBytes)
io.core.exe(w).iresp.valid := (forward_uop.dst_rtype === RT_FIX) && data_ready && live
io.core.exe(w).fresp.valid := (forward_uop.dst_rtype === RT_FLT) && data_ready && live
io.core.exe(w).iresp.bits.uop := forward_uop
io.core.exe(w).fresp.bits.uop := forward_uop
io.core.exe(w).iresp.bits.data := loadgen.data
io.core.exe(w).fresp.bits.data := loadgen.data
when (data_ready && live) {
ldq(f_idx).bits.succeeded := data_ready
ldq(f_idx).bits.forward_std_val := true.B
ldq(f_idx).bits.forward_stq_idx := wb_forward_stq_idx(w)
ldq(f_idx).bits.debug_wb_data := loadgen.data
}
}
}
// Initially assume the speculative load wakeup failed
io.core.ld_miss := RegNext(io.core.spec_ld_wakeup.map(_.valid).reduce(_||_))
val spec_ld_succeed = widthMap(w =>
!RegNext(io.core.spec_ld_wakeup(w).valid) ||
(io.core.exe(w).iresp.valid &&
io.core.exe(w).iresp.bits.uop.ldq_idx === RegNext(mem_incoming_uop(w).ldq_idx)
)
).reduce(_&&_)
when (spec_ld_succeed) {
io.core.ld_miss := false.B
}
//-------------------------------------------------------------
// Kill speculated entries on branch mispredict
//-------------------------------------------------------------
//-------------------------------------------------------------
// Kill stores
val st_brkilled_mask = Wire(Vec(numStqEntries, Bool()))
for (i <- 0 until numStqEntries)
{
st_brkilled_mask(i) := false.B
when (stq(i).valid)
{
stq(i).bits.uop.br_mask := GetNewBrMask(io.core.brupdate, stq(i).bits.uop.br_mask)
when (IsKilledByBranch(io.core.brupdate, stq(i).bits.uop))
{
stq(i).valid := false.B
stq(i).bits.addr.valid := false.B
stq(i).bits.data.valid := false.B
st_brkilled_mask(i) := true.B
}
}
assert (!(IsKilledByBranch(io.core.brupdate, stq(i).bits.uop) && stq(i).valid && stq(i).bits.committed),
"Branch is trying to clear a committed store.")
}
// Kill loads
for (i <- 0 until numLdqEntries)
{
when (ldq(i).valid)
{
ldq(i).bits.uop.br_mask := GetNewBrMask(io.core.brupdate, ldq(i).bits.uop.br_mask)
when (IsKilledByBranch(io.core.brupdate, ldq(i).bits.uop))
{
ldq(i).valid := false.B
ldq(i).bits.addr.valid := false.B
}
}
}
//-------------------------------------------------------------
when (io.core.brupdate.b2.mispredict && !io.core.exception)
{
stq_tail := io.core.brupdate.b2.uop.stq_idx
ldq_tail := io.core.brupdate.b2.uop.ldq_idx
}
//-------------------------------------------------------------
//-------------------------------------------------------------
// dequeue old entries on commit
//-------------------------------------------------------------
//-------------------------------------------------------------
var temp_stq_commit_head = stq_commit_head
var temp_ldq_head = ldq_head
for (w <- 0 until coreWidth)
{
val commit_store = io.core.commit.valids(w) && io.core.commit.uops(w).uses_stq
val commit_load = io.core.commit.valids(w) && io.core.commit.uops(w).uses_ldq
val idx = Mux(commit_store, temp_stq_commit_head, temp_ldq_head)
when (commit_store)
{
stq(idx).bits.committed := true.B
} .elsewhen (commit_load) {
assert (ldq(idx).valid, "[lsu] trying to commit an un-allocated load entry.")
assert ((ldq(idx).bits.executed || ldq(idx).bits.forward_std_val) && ldq(idx).bits.succeeded ,
"[lsu] trying to commit an un-executed load entry.")
ldq(idx).valid := false.B
ldq(idx).bits.addr.valid := false.B
ldq(idx).bits.executed := false.B
ldq(idx).bits.succeeded := false.B
ldq(idx).bits.order_fail := false.B
ldq(idx).bits.forward_std_val := false.B
}
if (MEMTRACE_PRINTF) {
when (commit_store || commit_load) {
val uop = Mux(commit_store, stq(idx).bits.uop, ldq(idx).bits.uop)
val addr = Mux(commit_store, stq(idx).bits.addr.bits, ldq(idx).bits.addr.bits)
val stdata = Mux(commit_store, stq(idx).bits.data.bits, 0.U)
val wbdata = Mux(commit_store, stq(idx).bits.debug_wb_data, ldq(idx).bits.debug_wb_data)
printf("MT %x %x %x %x %x %x %x\n",
io.core.tsc_reg, uop.uopc, uop.mem_cmd, uop.mem_size, addr, stdata, wbdata)
}
}
temp_stq_commit_head = Mux(commit_store,
WrapInc(temp_stq_commit_head, numStqEntries),
temp_stq_commit_head)
temp_ldq_head = Mux(commit_load,
WrapInc(temp_ldq_head, numLdqEntries),
temp_ldq_head)
}
stq_commit_head := temp_stq_commit_head
ldq_head := temp_ldq_head
// store has been committed AND successfully sent data to memory
when (stq(stq_head).valid && stq(stq_head).bits.committed)
{
when (stq(stq_head).bits.uop.is_fence && !io.dmem.ordered) {
io.dmem.force_order := true.B
store_needs_order := true.B
}
clear_store := Mux(stq(stq_head).bits.uop.is_fence, io.dmem.ordered,
stq(stq_head).bits.succeeded)
}
when (clear_store)
{
stq(stq_head).valid := false.B
stq(stq_head).bits.addr.valid := false.B
stq(stq_head).bits.data.valid := false.B
stq(stq_head).bits.succeeded := false.B
stq(stq_head).bits.committed := false.B
stq_head := WrapInc(stq_head, numStqEntries)
when (stq(stq_head).bits.uop.is_fence)
{
stq_execute_head := WrapInc(stq_execute_head, numStqEntries)
}
}
// -----------------------
// Hellacache interface
// We need to time things like a HellaCache would
io.hellacache.req.ready := false.B
io.hellacache.s2_nack := false.B
io.hellacache.s2_xcpt := (0.U).asTypeOf(new rocket.HellaCacheExceptions)
io.hellacache.resp.valid := false.B
io.hellacache.store_pending := stq.map(_.valid).reduce(_||_)
when (hella_state === h_ready) {
io.hellacache.req.ready := true.B
when (io.hellacache.req.fire) {
hella_req := io.hellacache.req.bits
hella_state := h_s1
}
} .elsewhen (hella_state === h_s1) {
can_fire_hella_incoming(memWidth-1) := true.B
hella_data := io.hellacache.s1_data
hella_xcpt := dtlb.io.resp(memWidth-1)
when (io.hellacache.s1_kill) {
when (will_fire_hella_incoming(memWidth-1) && dmem_req_fire(memWidth-1)) {
hella_state := h_dead
} .otherwise {
hella_state := h_ready
}
} .elsewhen (will_fire_hella_incoming(memWidth-1) && dmem_req_fire(memWidth-1)) {
hella_state := h_s2
} .otherwise {
hella_state := h_s2_nack
}
} .elsewhen (hella_state === h_s2_nack) {
io.hellacache.s2_nack := true.B
hella_state := h_ready
} .elsewhen (hella_state === h_s2) {
io.hellacache.s2_xcpt := hella_xcpt
when (io.hellacache.s2_kill || hella_xcpt.asUInt =/= 0.U) {
hella_state := h_dead
} .otherwise {
hella_state := h_wait
}
} .elsewhen (hella_state === h_wait) {
for (w <- 0 until memWidth) {
when (io.dmem.resp(w).valid && io.dmem.resp(w).bits.is_hella) {
hella_state := h_ready
io.hellacache.resp.valid := true.B
io.hellacache.resp.bits.addr := hella_req.addr
io.hellacache.resp.bits.tag := hella_req.tag
io.hellacache.resp.bits.cmd := hella_req.cmd
io.hellacache.resp.bits.signed := hella_req.signed
io.hellacache.resp.bits.size := hella_req.size
io.hellacache.resp.bits.data := io.dmem.resp(w).bits.data
} .elsewhen (io.dmem.nack(w).valid && io.dmem.nack(w).bits.is_hella) {
hella_state := h_replay
}
}
} .elsewhen (hella_state === h_replay) {
can_fire_hella_wakeup(memWidth-1) := true.B
when (will_fire_hella_wakeup(memWidth-1) && dmem_req_fire(memWidth-1)) {
hella_state := h_wait
}
} .elsewhen (hella_state === h_dead) {
for (w <- 0 until memWidth) {
when (io.dmem.resp(w).valid && io.dmem.resp(w).bits.is_hella) {
hella_state := h_ready
}
}
}
//-------------------------------------------------------------
// Exception / Reset
// for the live_store_mask, need to kill stores that haven't been committed
val st_exc_killed_mask = WireInit(VecInit((0 until numStqEntries).map(x=>false.B)))
when (reset.asBool || io.core.exception)
{
ldq_head := 0.U
ldq_tail := 0.U
when (reset.asBool)
{
stq_head := 0.U
stq_tail := 0.U
stq_commit_head := 0.U
stq_execute_head := 0.U
for (i <- 0 until numStqEntries)
{
stq(i).valid := false.B
stq(i).bits.addr.valid := false.B
stq(i).bits.data.valid := false.B
stq(i).bits.uop := NullMicroOp
}
}
.otherwise // exception
{
stq_tail := stq_commit_head
for (i <- 0 until numStqEntries)
{
when (!stq(i).bits.committed && !stq(i).bits.succeeded)
{
stq(i).valid := false.B
stq(i).bits.addr.valid := false.B
stq(i).bits.data.valid := false.B
st_exc_killed_mask(i) := true.B
}
}
}
for (i <- 0 until numLdqEntries)
{
ldq(i).valid := false.B
ldq(i).bits.addr.valid := false.B
ldq(i).bits.executed := false.B
}
}
//-------------------------------------------------------------
// Live Store Mask
// track a bit-array of stores that are alive
// (could maybe be re-produced from the stq_head/stq_tail, but need to know include spec_killed entries)
// TODO is this the most efficient way to compute the live store mask?
live_store_mask := next_live_store_mask &
~(st_brkilled_mask.asUInt) &
~(st_exc_killed_mask.asUInt)
}
/**
* Object to take an address and generate an 8-bit mask of which bytes within a
* double-word.
*/
object GenByteMask
{
def apply(addr: UInt, size: UInt): UInt =
{
val mask = Wire(UInt(8.W))
mask := MuxCase(255.U(8.W), Array(
(size === 0.U) -> (1.U(8.W) << addr(2,0)),
(size === 1.U) -> (3.U(8.W) << (addr(2,1) << 1.U)),
(size === 2.U) -> Mux(addr(2), 240.U(8.W), 15.U(8.W)),
(size === 3.U) -> 255.U(8.W)))
mask
}
}
/**
* ...
*/
class ForwardingAgeLogic(num_entries: Int)(implicit p: Parameters) extends BoomModule()(p)
{
val io = IO(new Bundle
{
val addr_matches = Input(UInt(num_entries.W)) // bit vector of addresses that match
// between the load and the SAQ
val youngest_st_idx = Input(UInt(stqAddrSz.W)) // needed to get "age"
val forwarding_val = Output(Bool())
val forwarding_idx = Output(UInt(stqAddrSz.W))
})
// generating mask that zeroes out anything younger than tail
val age_mask = Wire(Vec(num_entries, Bool()))
for (i <- 0 until num_entries)
{
age_mask(i) := true.B
when (i.U >= io.youngest_st_idx) // currently the tail points PAST last store, so use >=
{
age_mask(i) := false.B
}
}
// Priority encoder with moving tail: double length
val matches = Wire(UInt((2*num_entries).W))
matches := Cat(io.addr_matches & age_mask.asUInt,
io.addr_matches)
val found_match = Wire(Bool())
found_match := false.B
io.forwarding_idx := 0.U
// look for youngest, approach from the oldest side, let the last one found stick
for (i <- 0 until (2*num_entries))
{
when (matches(i))
{
found_match := true.B
io.forwarding_idx := (i % num_entries).U
}
}
io.forwarding_val := found_match
} | module ForwardingAgeLogic(
input [7:0] io_addr_matches,
input [2:0] io_youngest_st_idx,
output [2:0] io_forwarding_idx
);
wire [7:0] _matches_T_1 = io_addr_matches & {1'h0, &io_youngest_st_idx, io_youngest_st_idx > 3'h5, io_youngest_st_idx > 3'h4, io_youngest_st_idx[2], io_youngest_st_idx > 3'h2, |(io_youngest_st_idx[2:1]), |io_youngest_st_idx};
assign io_forwarding_idx = _matches_T_1[7] ? 3'h7 : _matches_T_1[6] ? 3'h6 : _matches_T_1[5] ? 3'h5 : _matches_T_1[4] ? 3'h4 : _matches_T_1[3] ? 3'h3 : _matches_T_1[2] ? 3'h2 : _matches_T_1[1] ? 3'h1 : _matches_T_1[0] ? 3'h0 : io_addr_matches[7] ? 3'h7 : io_addr_matches[6] ? 3'h6 : io_addr_matches[5] ? 3'h5 : io_addr_matches[4] ? 3'h4 : io_addr_matches[3] ? 3'h3 : io_addr_matches[2] ? 3'h2 : {2'h0, io_addr_matches[1]};
endmodule |
Generate the Verilog code corresponding to this Chisel code /*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.Fill
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundAnyRawFNToRecFN(
inExpWidth: Int,
inSigWidth: Int,
outExpWidth: Int,
outSigWidth: Int,
options: Int
)
extends RawModule
{
override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(inExpWidth, inSigWidth))
// (allowed exponent range has limits)
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0)
val effectiveInSigWidth =
if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1
val neverUnderflows =
((options &
(flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact)
) != 0) ||
(inExpWidth < outExpWidth)
val neverOverflows =
((options & flRoundOpt_neverOverflows) != 0) ||
(inExpWidth < outExpWidth)
val outNaNExp = BigInt(7)<<(outExpWidth - 2)
val outInfExp = BigInt(6)<<(outExpWidth - 2)
val outMaxFiniteExp = outInfExp - 1
val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2
val outMinNonzeroExp = outMinNormExp - outSigWidth + 1
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
val roundMagUp =
(roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sAdjustedExp =
if (inExpWidth < outExpWidth)
(io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
)(outExpWidth, 0).zext
else if (inExpWidth == outExpWidth)
io.in.sExp
else
io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
val adjustedSig =
if (inSigWidth <= outSigWidth + 2)
io.in.sig<<(outSigWidth - inSigWidth + 2)
else
(io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ##
io.in.sig(inSigWidth - outSigWidth - 2, 0).orR
)
val doShiftSigDown1 =
if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2)
val common_expOut = Wire(UInt((outExpWidth + 1).W))
val common_fractOut = Wire(UInt((outSigWidth - 1).W))
val common_overflow = Wire(Bool())
val common_totalUnderflow = Wire(Bool())
val common_underflow = Wire(Bool())
val common_inexact = Wire(Bool())
if (
neverOverflows && neverUnderflows
&& (effectiveInSigWidth <= outSigWidth)
) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1
common_fractOut :=
Mux(doShiftSigDown1,
adjustedSig(outSigWidth + 1, 3),
adjustedSig(outSigWidth, 2)
)
common_overflow := false.B
common_totalUnderflow := false.B
common_underflow := false.B
common_inexact := false.B
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundMask =
if (neverUnderflows)
0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W)
else
(lowMask(
sAdjustedExp(outExpWidth, 0),
outMinNormExp - outSigWidth - 1,
outMinNormExp
) | doShiftSigDown1) ##
3.U(2.W)
val shiftedRoundMask = 0.U(1.W) ## roundMask>>1
val roundPosMask = ~shiftedRoundMask & roundMask
val roundPosBit = (adjustedSig & roundPosMask).orR
val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR
val anyRound = roundPosBit || anyRoundExtra
val roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
roundPosBit) ||
(roundMagUp && anyRound)
val roundedSig: Bits =
Mux(roundIncr,
(((adjustedSig | roundMask)>>2) +& 1.U) &
~Mux(roundingMode_near_even && roundPosBit &&
! anyRoundExtra,
roundMask>>1,
0.U((outSigWidth + 2).W)
),
(adjustedSig & ~roundMask)>>2 |
Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U)
)
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext
common_expOut := sRoundedExp(outExpWidth, 0)
common_fractOut :=
Mux(doShiftSigDown1,
roundedSig(outSigWidth - 1, 1),
roundedSig(outSigWidth - 2, 0)
)
common_overflow :=
(if (neverOverflows) false.B else
//*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?:
(sRoundedExp>>(outExpWidth - 1) >= 3.S))
common_totalUnderflow :=
(if (neverUnderflows) false.B else
//*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?:
(sRoundedExp < outMinNonzeroExp.S))
val unboundedRange_roundPosBit =
Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1))
val unboundedRange_anyRound =
(doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR
val unboundedRange_roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
unboundedRange_roundPosBit) ||
(roundMagUp && unboundedRange_anyRound)
val roundCarry =
Mux(doShiftSigDown1,
roundedSig(outSigWidth + 1),
roundedSig(outSigWidth)
)
common_underflow :=
(if (neverUnderflows) false.B else
common_totalUnderflow ||
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
(anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) &&
Mux(doShiftSigDown1, roundMask(3), roundMask(2)) &&
! ((io.detectTininess === tininess_afterRounding) &&
! Mux(doShiftSigDown1,
roundMask(4),
roundMask(3)
) &&
roundCarry && roundPosBit &&
unboundedRange_roundIncr)))
common_inexact := common_totalUnderflow || anyRound
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val isNaNOut = io.invalidExc || io.in.isNaN
val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf
val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero
val overflow = commonCase && common_overflow
val underflow = commonCase && common_underflow
val inexact = overflow || (commonCase && common_inexact)
val overflow_roundMagUp =
roundingMode_near_even || roundingMode_near_maxMag || roundMagUp
val pegMinNonzeroMagOut =
commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd)
val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp
val notNaN_isInfOut =
notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp)
val signOut = Mux(isNaNOut, false.B, io.in.sign)
val expOut =
(common_expOut &
~Mux(io.in.isZero || common_totalUnderflow,
(BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMinNonzeroMagOut,
~outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMaxFiniteMagOut,
(BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W),
0.U
) &
~Mux(notNaN_isInfOut,
(BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
)) |
Mux(pegMinNonzeroMagOut,
outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) |
Mux(pegMaxFiniteMagOut,
outMaxFiniteExp.U((outExpWidth + 1).W),
0.U
) |
Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) |
Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U)
val fractOut =
Mux(isNaNOut || io.in.isZero || common_totalUnderflow,
Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U),
common_fractOut
) |
Fill(outSigWidth - 1, pegMaxFiniteMagOut)
io.out := signOut ## expOut ## fractOut
io.exceptionFlags :=
io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int)
extends RawModule
{
override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(expWidth, sigWidth + 2))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
expWidth, sigWidth + 2, expWidth, sigWidth, options))
roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc
roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc
roundAnyRawFNToRecFN.io.in := io.in
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
} | module RoundRawFNToRecFN_e5_s11(
input io_invalidExc,
input io_infiniteExc,
input io_in_isNaN,
input io_in_isInf,
input io_in_isZero,
input io_in_sign,
input [6:0] io_in_sExp,
input [13:0] io_in_sig,
input [2:0] io_roundingMode,
output [16:0] io_out,
output [4:0] io_exceptionFlags
);
RoundAnyRawFNToRecFN_ie5_is13_oe5_os11 roundAnyRawFNToRecFN (
.io_invalidExc (io_invalidExc),
.io_infiniteExc (io_infiniteExc),
.io_in_isNaN (io_in_isNaN),
.io_in_isInf (io_in_isInf),
.io_in_isZero (io_in_isZero),
.io_in_sign (io_in_sign),
.io_in_sExp (io_in_sExp),
.io_in_sig (io_in_sig),
.io_roundingMode (io_roundingMode),
.io_out (io_out),
.io_exceptionFlags (io_exceptionFlags)
);
endmodule |
Generate the Verilog code corresponding to this Chisel code /*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import freechips.rocketchip.util._
case class ListBufferParameters[T <: Data](gen: T, queues: Int, entries: Int, bypass: Boolean)
{
val queueBits = log2Up(queues)
val entryBits = log2Up(entries)
}
class ListBufferPush[T <: Data](params: ListBufferParameters[T]) extends Bundle
{
val index = UInt(params.queueBits.W)
val data = Output(params.gen)
}
class ListBuffer[T <: Data](params: ListBufferParameters[T]) extends Module
{
override def desiredName = s"ListBuffer_${params.gen.typeName}_q${params.queues}_e${params.entries}"
val io = IO(new Bundle {
// push is visible on the same cycle; flow queues
val push = Flipped(Decoupled(new ListBufferPush(params)))
val valid = UInt(params.queues.W)
val pop = Flipped(Valid(UInt(params.queueBits.W)))
val data = Output(params.gen)
})
val valid = RegInit(0.U(params.queues.W))
val head = Mem(params.queues, UInt(params.entryBits.W))
val tail = Mem(params.queues, UInt(params.entryBits.W))
val used = RegInit(0.U(params.entries.W))
val next = Mem(params.entries, UInt(params.entryBits.W))
val data = Mem(params.entries, params.gen)
val freeOH = ~(leftOR(~used) << 1) & ~used
val freeIdx = OHToUInt(freeOH)
val valid_set = WireDefault(0.U(params.queues.W))
val valid_clr = WireDefault(0.U(params.queues.W))
val used_set = WireDefault(0.U(params.entries.W))
val used_clr = WireDefault(0.U(params.entries.W))
val push_tail = tail.read(io.push.bits.index)
val push_valid = valid(io.push.bits.index)
io.push.ready := !used.andR
when (io.push.fire) {
valid_set := UIntToOH(io.push.bits.index, params.queues)
used_set := freeOH
data.write(freeIdx, io.push.bits.data)
when (push_valid) {
next.write(push_tail, freeIdx)
} .otherwise {
head.write(io.push.bits.index, freeIdx)
}
tail.write(io.push.bits.index, freeIdx)
}
val pop_head = head.read(io.pop.bits)
val pop_valid = valid(io.pop.bits)
// Bypass push data to the peek port
io.data := (if (!params.bypass) data.read(pop_head) else Mux(!pop_valid, io.push.bits.data, data.read(pop_head)))
io.valid := (if (!params.bypass) valid else (valid | valid_set))
// It is an error to pop something that is not valid
assert (!io.pop.fire || (io.valid)(io.pop.bits))
when (io.pop.fire) {
used_clr := UIntToOH(pop_head, params.entries)
when (pop_head === tail.read(io.pop.bits)) {
valid_clr := UIntToOH(io.pop.bits, params.queues)
}
head.write(io.pop.bits, Mux(io.push.fire && push_valid && push_tail === pop_head, freeIdx, next.read(pop_head)))
}
// Empty bypass changes no state
when ((!params.bypass).B || !io.pop.valid || pop_valid) {
used := (used & ~used_clr) | used_set
valid := (valid & ~valid_clr) | valid_set
}
} | module ListBuffer_PutBufferCEntry_q2_e16(
input clock,
input reset,
output io_push_ready,
input io_push_valid,
input io_push_bits_index,
input [63:0] io_push_bits_data_data,
input io_push_bits_data_corrupt,
output [1:0] io_valid,
input io_pop_valid,
input io_pop_bits,
output [63:0] io_data_data,
output io_data_corrupt
);
wire [64:0] _data_ext_R0_data;
wire [3:0] _next_ext_R0_data;
wire [3:0] _tail_ext_R0_data;
wire [3:0] _tail_ext_R1_data;
wire [3:0] _head_ext_R0_data;
reg [1:0] valid;
reg [15:0] used;
wire [15:0] _freeOH_T_16 = ~used;
wire [14:0] _freeOH_T_3 = _freeOH_T_16[14:0] | {_freeOH_T_16[13:0], 1'h0};
wire [14:0] _freeOH_T_6 = _freeOH_T_3 | {_freeOH_T_3[12:0], 2'h0};
wire [14:0] _freeOH_T_9 = _freeOH_T_6 | {_freeOH_T_6[10:0], 4'h0};
wire [15:0] freeIdx_lo = {~(_freeOH_T_9 | {_freeOH_T_9[6:0], 8'h0}), 1'h1} & _freeOH_T_16;
wire [6:0] _freeIdx_T_3 = freeIdx_lo[15:9] | freeIdx_lo[7:1];
wire [2:0] _freeIdx_T_5 = _freeIdx_T_3[6:4] | _freeIdx_T_3[2:0];
wire _freeIdx_T_7 = _freeIdx_T_5[2] | _freeIdx_T_5[0];
wire [1:0] _GEN = {1'h0, io_push_bits_index};
wire [1:0] _push_valid_T = valid >> _GEN;
wire io_push_ready_0 = used != 16'hFFFF;
wire data_MPORT_en = io_push_ready_0 & io_push_valid;
wire [3:0] data_MPORT_addr = {|(freeIdx_lo[15:8]), |(_freeIdx_T_3[6:3]), |(_freeIdx_T_5[2:1]), _freeIdx_T_7};
wire [1:0] _GEN_0 = {1'h0, io_pop_bits};
always @(posedge clock) begin
if (reset) begin
valid <= 2'h0;
used <= 16'h0;
end
else begin
valid <= valid & ~(io_pop_valid & _head_ext_R0_data == _tail_ext_R1_data ? 2'h1 << _GEN_0 : 2'h0) | (data_MPORT_en ? 2'h1 << _GEN : 2'h0);
used <= used & ~(io_pop_valid ? 16'h1 << _head_ext_R0_data : 16'h0) | (data_MPORT_en ? freeIdx_lo : 16'h0);
end
end
head_2x4 head_ext (
.R0_addr (io_pop_bits),
.R0_en (1'h1),
.R0_clk (clock),
.R0_data (_head_ext_R0_data),
.W0_addr (io_pop_bits),
.W0_en (io_pop_valid),
.W0_clk (clock),
.W0_data (data_MPORT_en & _push_valid_T[0] & _tail_ext_R0_data == _head_ext_R0_data ? {|(freeIdx_lo[15:8]), |(_freeIdx_T_3[6:3]), |(_freeIdx_T_5[2:1]), _freeIdx_T_7} : _next_ext_R0_data),
.W1_addr (io_push_bits_index),
.W1_en (data_MPORT_en & ~(_push_valid_T[0])),
.W1_clk (clock),
.W1_data (data_MPORT_addr)
);
tail_2x4 tail_ext (
.R0_addr (io_push_bits_index),
.R0_en (1'h1),
.R0_clk (clock),
.R0_data (_tail_ext_R0_data),
.R1_addr (io_pop_bits),
.R1_en (io_pop_valid),
.R1_clk (clock),
.R1_data (_tail_ext_R1_data),
.W0_addr (io_push_bits_index),
.W0_en (data_MPORT_en),
.W0_clk (clock),
.W0_data (data_MPORT_addr)
);
next_16x4 next_ext (
.R0_addr (_head_ext_R0_data),
.R0_en (io_pop_valid),
.R0_clk (clock),
.R0_data (_next_ext_R0_data),
.W0_addr (_tail_ext_R0_data),
.W0_en (data_MPORT_en & _push_valid_T[0]),
.W0_clk (clock),
.W0_data (data_MPORT_addr)
);
data_16x65 data_ext (
.R0_addr (_head_ext_R0_data),
.R0_en (1'h1),
.R0_clk (clock),
.R0_data (_data_ext_R0_data),
.W0_addr (data_MPORT_addr),
.W0_en (data_MPORT_en),
.W0_clk (clock),
.W0_data ({io_push_bits_data_corrupt, io_push_bits_data_data})
);
assign io_push_ready = io_push_ready_0;
assign io_valid = valid;
assign io_data_data = _data_ext_R0_data[63:0];
assign io_data_corrupt = _data_ext_R0_data[64];
endmodule |
Generate the Verilog code corresponding to this Chisel code //******************************************************************************
// Copyright (c) 2015 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// RISCV Processor Issue Slot Logic
//--------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// Note: stores (and AMOs) are "broken down" into 2 uops, but stored within a single issue-slot.
// TODO XXX make a separate issueSlot for MemoryIssueSlots, and only they break apart stores.
// TODO Disable ldspec for FP queue.
package boom.v3.exu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import boom.v3.common._
import boom.v3.util._
import FUConstants._
/**
* IO bundle to interact with Issue slot
*
* @param numWakeupPorts number of wakeup ports for the slot
*/
class IssueSlotIO(val numWakeupPorts: Int)(implicit p: Parameters) extends BoomBundle
{
val valid = Output(Bool())
val will_be_valid = Output(Bool()) // TODO code review, do we need this signal so explicitely?
val request = Output(Bool())
val request_hp = Output(Bool())
val grant = Input(Bool())
val brupdate = Input(new BrUpdateInfo())
val kill = Input(Bool()) // pipeline flush
val clear = Input(Bool()) // entry being moved elsewhere (not mutually exclusive with grant)
val ldspec_miss = Input(Bool()) // Previous cycle's speculative load wakeup was mispredicted.
val wakeup_ports = Flipped(Vec(numWakeupPorts, Valid(new IqWakeup(maxPregSz))))
val pred_wakeup_port = Flipped(Valid(UInt(log2Ceil(ftqSz).W)))
val spec_ld_wakeup = Flipped(Vec(memWidth, Valid(UInt(width=maxPregSz.W))))
val in_uop = Flipped(Valid(new MicroOp())) // if valid, this WILL overwrite an entry!
val out_uop = Output(new MicroOp()) // the updated slot uop; will be shifted upwards in a collasping queue.
val uop = Output(new MicroOp()) // the current Slot's uop. Sent down the pipeline when issued.
val debug = {
val result = new Bundle {
val p1 = Bool()
val p2 = Bool()
val p3 = Bool()
val ppred = Bool()
val state = UInt(width=2.W)
}
Output(result)
}
}
/**
* Single issue slot. Holds a uop within the issue queue
*
* @param numWakeupPorts number of wakeup ports
*/
class IssueSlot(val numWakeupPorts: Int)(implicit p: Parameters)
extends BoomModule
with IssueUnitConstants
{
val io = IO(new IssueSlotIO(numWakeupPorts))
// slot invalid?
// slot is valid, holding 1 uop
// slot is valid, holds 2 uops (like a store)
def is_invalid = state === s_invalid
def is_valid = state =/= s_invalid
val next_state = Wire(UInt()) // the next state of this slot (which might then get moved to a new slot)
val next_uopc = Wire(UInt()) // the next uopc of this slot (which might then get moved to a new slot)
val next_lrs1_rtype = Wire(UInt()) // the next reg type of this slot (which might then get moved to a new slot)
val next_lrs2_rtype = Wire(UInt()) // the next reg type of this slot (which might then get moved to a new slot)
val state = RegInit(s_invalid)
val p1 = RegInit(false.B)
val p2 = RegInit(false.B)
val p3 = RegInit(false.B)
val ppred = RegInit(false.B)
// Poison if woken up by speculative load.
// Poison lasts 1 cycle (as ldMiss will come on the next cycle).
// SO if poisoned is true, set it to false!
val p1_poisoned = RegInit(false.B)
val p2_poisoned = RegInit(false.B)
p1_poisoned := false.B
p2_poisoned := false.B
val next_p1_poisoned = Mux(io.in_uop.valid, io.in_uop.bits.iw_p1_poisoned, p1_poisoned)
val next_p2_poisoned = Mux(io.in_uop.valid, io.in_uop.bits.iw_p2_poisoned, p2_poisoned)
val slot_uop = RegInit(NullMicroOp)
val next_uop = Mux(io.in_uop.valid, io.in_uop.bits, slot_uop)
//-----------------------------------------------------------------------------
// next slot state computation
// compute the next state for THIS entry slot (in a collasping queue, the
// current uop may get moved elsewhere, and a new uop can enter
when (io.kill) {
state := s_invalid
} .elsewhen (io.in_uop.valid) {
state := io.in_uop.bits.iw_state
} .elsewhen (io.clear) {
state := s_invalid
} .otherwise {
state := next_state
}
//-----------------------------------------------------------------------------
// "update" state
// compute the next state for the micro-op in this slot. This micro-op may
// be moved elsewhere, so the "next_state" travels with it.
// defaults
next_state := state
next_uopc := slot_uop.uopc
next_lrs1_rtype := slot_uop.lrs1_rtype
next_lrs2_rtype := slot_uop.lrs2_rtype
when (io.kill) {
next_state := s_invalid
} .elsewhen ((io.grant && (state === s_valid_1)) ||
(io.grant && (state === s_valid_2) && p1 && p2 && ppred)) {
// try to issue this uop.
when (!(io.ldspec_miss && (p1_poisoned || p2_poisoned))) {
next_state := s_invalid
}
} .elsewhen (io.grant && (state === s_valid_2)) {
when (!(io.ldspec_miss && (p1_poisoned || p2_poisoned))) {
next_state := s_valid_1
when (p1) {
slot_uop.uopc := uopSTD
next_uopc := uopSTD
slot_uop.lrs1_rtype := RT_X
next_lrs1_rtype := RT_X
} .otherwise {
slot_uop.lrs2_rtype := RT_X
next_lrs2_rtype := RT_X
}
}
}
when (io.in_uop.valid) {
slot_uop := io.in_uop.bits
assert (is_invalid || io.clear || io.kill, "trying to overwrite a valid issue slot.")
}
// Wakeup Compare Logic
// these signals are the "next_p*" for the current slot's micro-op.
// they are important for shifting the current slot_uop up to an other entry.
val next_p1 = WireInit(p1)
val next_p2 = WireInit(p2)
val next_p3 = WireInit(p3)
val next_ppred = WireInit(ppred)
when (io.in_uop.valid) {
p1 := !(io.in_uop.bits.prs1_busy)
p2 := !(io.in_uop.bits.prs2_busy)
p3 := !(io.in_uop.bits.prs3_busy)
ppred := !(io.in_uop.bits.ppred_busy)
}
when (io.ldspec_miss && next_p1_poisoned) {
assert(next_uop.prs1 =/= 0.U, "Poison bit can't be set for prs1=x0!")
p1 := false.B
}
when (io.ldspec_miss && next_p2_poisoned) {
assert(next_uop.prs2 =/= 0.U, "Poison bit can't be set for prs2=x0!")
p2 := false.B
}
for (i <- 0 until numWakeupPorts) {
when (io.wakeup_ports(i).valid &&
(io.wakeup_ports(i).bits.pdst === next_uop.prs1)) {
p1 := true.B
}
when (io.wakeup_ports(i).valid &&
(io.wakeup_ports(i).bits.pdst === next_uop.prs2)) {
p2 := true.B
}
when (io.wakeup_ports(i).valid &&
(io.wakeup_ports(i).bits.pdst === next_uop.prs3)) {
p3 := true.B
}
}
when (io.pred_wakeup_port.valid && io.pred_wakeup_port.bits === next_uop.ppred) {
ppred := true.B
}
for (w <- 0 until memWidth) {
assert (!(io.spec_ld_wakeup(w).valid && io.spec_ld_wakeup(w).bits === 0.U),
"Loads to x0 should never speculatively wakeup other instructions")
}
// TODO disable if FP IQ.
for (w <- 0 until memWidth) {
when (io.spec_ld_wakeup(w).valid &&
io.spec_ld_wakeup(w).bits === next_uop.prs1 &&
next_uop.lrs1_rtype === RT_FIX) {
p1 := true.B
p1_poisoned := true.B
assert (!next_p1_poisoned)
}
when (io.spec_ld_wakeup(w).valid &&
io.spec_ld_wakeup(w).bits === next_uop.prs2 &&
next_uop.lrs2_rtype === RT_FIX) {
p2 := true.B
p2_poisoned := true.B
assert (!next_p2_poisoned)
}
}
// Handle branch misspeculations
val next_br_mask = GetNewBrMask(io.brupdate, slot_uop)
// was this micro-op killed by a branch? if yes, we can't let it be valid if
// we compact it into an other entry
when (IsKilledByBranch(io.brupdate, slot_uop)) {
next_state := s_invalid
}
when (!io.in_uop.valid) {
slot_uop.br_mask := next_br_mask
}
//-------------------------------------------------------------
// Request Logic
io.request := is_valid && p1 && p2 && p3 && ppred && !io.kill
val high_priority = slot_uop.is_br || slot_uop.is_jal || slot_uop.is_jalr
io.request_hp := io.request && high_priority
when (state === s_valid_1) {
io.request := p1 && p2 && p3 && ppred && !io.kill
} .elsewhen (state === s_valid_2) {
io.request := (p1 || p2) && ppred && !io.kill
} .otherwise {
io.request := false.B
}
//assign outputs
io.valid := is_valid
io.uop := slot_uop
io.uop.iw_p1_poisoned := p1_poisoned
io.uop.iw_p2_poisoned := p2_poisoned
// micro-op will vacate due to grant.
val may_vacate = io.grant && ((state === s_valid_1) || (state === s_valid_2) && p1 && p2 && ppred)
val squash_grant = io.ldspec_miss && (p1_poisoned || p2_poisoned)
io.will_be_valid := is_valid && !(may_vacate && !squash_grant)
io.out_uop := slot_uop
io.out_uop.iw_state := next_state
io.out_uop.uopc := next_uopc
io.out_uop.lrs1_rtype := next_lrs1_rtype
io.out_uop.lrs2_rtype := next_lrs2_rtype
io.out_uop.br_mask := next_br_mask
io.out_uop.prs1_busy := !p1
io.out_uop.prs2_busy := !p2
io.out_uop.prs3_busy := !p3
io.out_uop.ppred_busy := !ppred
io.out_uop.iw_p1_poisoned := p1_poisoned
io.out_uop.iw_p2_poisoned := p2_poisoned
when (state === s_valid_2) {
when (p1 && p2 && ppred) {
; // send out the entire instruction as one uop
} .elsewhen (p1 && ppred) {
io.uop.uopc := slot_uop.uopc
io.uop.lrs2_rtype := RT_X
} .elsewhen (p2 && ppred) {
io.uop.uopc := uopSTD
io.uop.lrs1_rtype := RT_X
}
}
// debug outputs
io.debug.p1 := p1
io.debug.p2 := p2
io.debug.p3 := p3
io.debug.ppred := ppred
io.debug.state := state
} | module IssueSlot(
input clock,
input reset,
output io_valid,
output io_will_be_valid,
output io_request,
input io_grant,
input [7:0] io_brupdate_b1_resolve_mask,
input [7:0] io_brupdate_b1_mispredict_mask,
input io_kill,
input io_clear,
input io_wakeup_ports_0_valid,
input [5:0] io_wakeup_ports_0_bits_pdst,
input io_wakeup_ports_1_valid,
input [5:0] io_wakeup_ports_1_bits_pdst,
input io_in_uop_valid,
input [6:0] io_in_uop_bits_uopc,
input [31:0] io_in_uop_bits_inst,
input [31:0] io_in_uop_bits_debug_inst,
input io_in_uop_bits_is_rvc,
input [39:0] io_in_uop_bits_debug_pc,
input [2:0] io_in_uop_bits_iq_type,
input [9:0] io_in_uop_bits_fu_code,
input [1:0] io_in_uop_bits_iw_state,
input io_in_uop_bits_is_br,
input io_in_uop_bits_is_jalr,
input io_in_uop_bits_is_jal,
input io_in_uop_bits_is_sfb,
input [7:0] io_in_uop_bits_br_mask,
input [2:0] io_in_uop_bits_br_tag,
input [3:0] io_in_uop_bits_ftq_idx,
input io_in_uop_bits_edge_inst,
input [5:0] io_in_uop_bits_pc_lob,
input io_in_uop_bits_taken,
input [19:0] io_in_uop_bits_imm_packed,
input [11:0] io_in_uop_bits_csr_addr,
input [4:0] io_in_uop_bits_rob_idx,
input [2:0] io_in_uop_bits_ldq_idx,
input [2:0] io_in_uop_bits_stq_idx,
input [1:0] io_in_uop_bits_rxq_idx,
input [5:0] io_in_uop_bits_pdst,
input [5:0] io_in_uop_bits_prs1,
input [5:0] io_in_uop_bits_prs2,
input [5:0] io_in_uop_bits_prs3,
input [3:0] io_in_uop_bits_ppred,
input io_in_uop_bits_prs1_busy,
input io_in_uop_bits_prs2_busy,
input io_in_uop_bits_prs3_busy,
input io_in_uop_bits_ppred_busy,
input [5:0] io_in_uop_bits_stale_pdst,
input io_in_uop_bits_exception,
input [63:0] io_in_uop_bits_exc_cause,
input io_in_uop_bits_bypassable,
input [4:0] io_in_uop_bits_mem_cmd,
input [1:0] io_in_uop_bits_mem_size,
input io_in_uop_bits_mem_signed,
input io_in_uop_bits_is_fence,
input io_in_uop_bits_is_fencei,
input io_in_uop_bits_is_amo,
input io_in_uop_bits_uses_ldq,
input io_in_uop_bits_uses_stq,
input io_in_uop_bits_is_sys_pc2epc,
input io_in_uop_bits_is_unique,
input io_in_uop_bits_flush_on_commit,
input io_in_uop_bits_ldst_is_rs1,
input [5:0] io_in_uop_bits_ldst,
input [5:0] io_in_uop_bits_lrs1,
input [5:0] io_in_uop_bits_lrs2,
input [5:0] io_in_uop_bits_lrs3,
input io_in_uop_bits_ldst_val,
input [1:0] io_in_uop_bits_dst_rtype,
input [1:0] io_in_uop_bits_lrs1_rtype,
input [1:0] io_in_uop_bits_lrs2_rtype,
input io_in_uop_bits_frs3_en,
input io_in_uop_bits_fp_val,
input io_in_uop_bits_fp_single,
input io_in_uop_bits_xcpt_pf_if,
input io_in_uop_bits_xcpt_ae_if,
input io_in_uop_bits_xcpt_ma_if,
input io_in_uop_bits_bp_debug_if,
input io_in_uop_bits_bp_xcpt_if,
input [1:0] io_in_uop_bits_debug_fsrc,
input [1:0] io_in_uop_bits_debug_tsrc,
output [6:0] io_out_uop_uopc,
output [31:0] io_out_uop_inst,
output [31:0] io_out_uop_debug_inst,
output io_out_uop_is_rvc,
output [39:0] io_out_uop_debug_pc,
output [2:0] io_out_uop_iq_type,
output [9:0] io_out_uop_fu_code,
output [1:0] io_out_uop_iw_state,
output io_out_uop_is_br,
output io_out_uop_is_jalr,
output io_out_uop_is_jal,
output io_out_uop_is_sfb,
output [7:0] io_out_uop_br_mask,
output [2:0] io_out_uop_br_tag,
output [3:0] io_out_uop_ftq_idx,
output io_out_uop_edge_inst,
output [5:0] io_out_uop_pc_lob,
output io_out_uop_taken,
output [19:0] io_out_uop_imm_packed,
output [11:0] io_out_uop_csr_addr,
output [4:0] io_out_uop_rob_idx,
output [2:0] io_out_uop_ldq_idx,
output [2:0] io_out_uop_stq_idx,
output [1:0] io_out_uop_rxq_idx,
output [5:0] io_out_uop_pdst,
output [5:0] io_out_uop_prs1,
output [5:0] io_out_uop_prs2,
output [5:0] io_out_uop_prs3,
output [3:0] io_out_uop_ppred,
output io_out_uop_prs1_busy,
output io_out_uop_prs2_busy,
output io_out_uop_prs3_busy,
output io_out_uop_ppred_busy,
output [5:0] io_out_uop_stale_pdst,
output io_out_uop_exception,
output [63:0] io_out_uop_exc_cause,
output io_out_uop_bypassable,
output [4:0] io_out_uop_mem_cmd,
output [1:0] io_out_uop_mem_size,
output io_out_uop_mem_signed,
output io_out_uop_is_fence,
output io_out_uop_is_fencei,
output io_out_uop_is_amo,
output io_out_uop_uses_ldq,
output io_out_uop_uses_stq,
output io_out_uop_is_sys_pc2epc,
output io_out_uop_is_unique,
output io_out_uop_flush_on_commit,
output io_out_uop_ldst_is_rs1,
output [5:0] io_out_uop_ldst,
output [5:0] io_out_uop_lrs1,
output [5:0] io_out_uop_lrs2,
output [5:0] io_out_uop_lrs3,
output io_out_uop_ldst_val,
output [1:0] io_out_uop_dst_rtype,
output [1:0] io_out_uop_lrs1_rtype,
output [1:0] io_out_uop_lrs2_rtype,
output io_out_uop_frs3_en,
output io_out_uop_fp_val,
output io_out_uop_fp_single,
output io_out_uop_xcpt_pf_if,
output io_out_uop_xcpt_ae_if,
output io_out_uop_xcpt_ma_if,
output io_out_uop_bp_debug_if,
output io_out_uop_bp_xcpt_if,
output [1:0] io_out_uop_debug_fsrc,
output [1:0] io_out_uop_debug_tsrc,
output [6:0] io_uop_uopc,
output [31:0] io_uop_inst,
output [31:0] io_uop_debug_inst,
output io_uop_is_rvc,
output [39:0] io_uop_debug_pc,
output [2:0] io_uop_iq_type,
output [9:0] io_uop_fu_code,
output [1:0] io_uop_iw_state,
output io_uop_is_br,
output io_uop_is_jalr,
output io_uop_is_jal,
output io_uop_is_sfb,
output [7:0] io_uop_br_mask,
output [2:0] io_uop_br_tag,
output [3:0] io_uop_ftq_idx,
output io_uop_edge_inst,
output [5:0] io_uop_pc_lob,
output io_uop_taken,
output [19:0] io_uop_imm_packed,
output [11:0] io_uop_csr_addr,
output [4:0] io_uop_rob_idx,
output [2:0] io_uop_ldq_idx,
output [2:0] io_uop_stq_idx,
output [1:0] io_uop_rxq_idx,
output [5:0] io_uop_pdst,
output [5:0] io_uop_prs1,
output [5:0] io_uop_prs2,
output [5:0] io_uop_prs3,
output [3:0] io_uop_ppred,
output io_uop_prs1_busy,
output io_uop_prs2_busy,
output io_uop_prs3_busy,
output io_uop_ppred_busy,
output [5:0] io_uop_stale_pdst,
output io_uop_exception,
output [63:0] io_uop_exc_cause,
output io_uop_bypassable,
output [4:0] io_uop_mem_cmd,
output [1:0] io_uop_mem_size,
output io_uop_mem_signed,
output io_uop_is_fence,
output io_uop_is_fencei,
output io_uop_is_amo,
output io_uop_uses_ldq,
output io_uop_uses_stq,
output io_uop_is_sys_pc2epc,
output io_uop_is_unique,
output io_uop_flush_on_commit,
output io_uop_ldst_is_rs1,
output [5:0] io_uop_ldst,
output [5:0] io_uop_lrs1,
output [5:0] io_uop_lrs2,
output [5:0] io_uop_lrs3,
output io_uop_ldst_val,
output [1:0] io_uop_dst_rtype,
output [1:0] io_uop_lrs1_rtype,
output [1:0] io_uop_lrs2_rtype,
output io_uop_frs3_en,
output io_uop_fp_val,
output io_uop_fp_single,
output io_uop_xcpt_pf_if,
output io_uop_xcpt_ae_if,
output io_uop_xcpt_ma_if,
output io_uop_bp_debug_if,
output io_uop_bp_xcpt_if,
output [1:0] io_uop_debug_fsrc,
output [1:0] io_uop_debug_tsrc
);
reg [1:0] state;
reg p1;
reg p2;
reg p3;
reg ppred;
reg [6:0] slot_uop_uopc;
reg [31:0] slot_uop_inst;
reg [31:0] slot_uop_debug_inst;
reg slot_uop_is_rvc;
reg [39:0] slot_uop_debug_pc;
reg [2:0] slot_uop_iq_type;
reg [9:0] slot_uop_fu_code;
reg [1:0] slot_uop_iw_state;
reg slot_uop_is_br;
reg slot_uop_is_jalr;
reg slot_uop_is_jal;
reg slot_uop_is_sfb;
reg [7:0] slot_uop_br_mask;
reg [2:0] slot_uop_br_tag;
reg [3:0] slot_uop_ftq_idx;
reg slot_uop_edge_inst;
reg [5:0] slot_uop_pc_lob;
reg slot_uop_taken;
reg [19:0] slot_uop_imm_packed;
reg [11:0] slot_uop_csr_addr;
reg [4:0] slot_uop_rob_idx;
reg [2:0] slot_uop_ldq_idx;
reg [2:0] slot_uop_stq_idx;
reg [1:0] slot_uop_rxq_idx;
reg [5:0] slot_uop_pdst;
reg [5:0] slot_uop_prs1;
reg [5:0] slot_uop_prs2;
reg [5:0] slot_uop_prs3;
reg [3:0] slot_uop_ppred;
reg slot_uop_prs1_busy;
reg slot_uop_prs2_busy;
reg slot_uop_prs3_busy;
reg slot_uop_ppred_busy;
reg [5:0] slot_uop_stale_pdst;
reg slot_uop_exception;
reg [63:0] slot_uop_exc_cause;
reg slot_uop_bypassable;
reg [4:0] slot_uop_mem_cmd;
reg [1:0] slot_uop_mem_size;
reg slot_uop_mem_signed;
reg slot_uop_is_fence;
reg slot_uop_is_fencei;
reg slot_uop_is_amo;
reg slot_uop_uses_ldq;
reg slot_uop_uses_stq;
reg slot_uop_is_sys_pc2epc;
reg slot_uop_is_unique;
reg slot_uop_flush_on_commit;
reg slot_uop_ldst_is_rs1;
reg [5:0] slot_uop_ldst;
reg [5:0] slot_uop_lrs1;
reg [5:0] slot_uop_lrs2;
reg [5:0] slot_uop_lrs3;
reg slot_uop_ldst_val;
reg [1:0] slot_uop_dst_rtype;
reg [1:0] slot_uop_lrs1_rtype;
reg [1:0] slot_uop_lrs2_rtype;
reg slot_uop_frs3_en;
reg slot_uop_fp_val;
reg slot_uop_fp_single;
reg slot_uop_xcpt_pf_if;
reg slot_uop_xcpt_ae_if;
reg slot_uop_xcpt_ma_if;
reg slot_uop_bp_debug_if;
reg slot_uop_bp_xcpt_if;
reg [1:0] slot_uop_debug_fsrc;
reg [1:0] slot_uop_debug_tsrc;
wire _GEN = state == 2'h2;
wire _GEN_0 = io_grant & _GEN;
wire _GEN_1 = _GEN_0 & p1;
wire _GEN_2 = io_grant & state == 2'h1 | _GEN_1 & p2 & ppred;
wire _GEN_3 = io_kill | _GEN_2;
wire _GEN_4 = _GEN_3 | ~_GEN_1;
wire _GEN_5 = _GEN_3 | ~_GEN_0 | p1;
wire [7:0] next_br_mask = slot_uop_br_mask & ~io_brupdate_b1_resolve_mask;
wire _GEN_6 = (|(io_brupdate_b1_mispredict_mask & slot_uop_br_mask)) | io_kill | _GEN_2;
wire _may_vacate_T = state == 2'h1;
wire _may_vacate_T_1 = state == 2'h2;
wire _GEN_7 = p1 & p2 & ppred;
wire _GEN_8 = p1 & ppred;
wire _GEN_9 = ~_may_vacate_T_1 | _GEN_7 | _GEN_8 | ~(p2 & ppred);
wire [5:0] next_uop_prs1 = io_in_uop_valid ? io_in_uop_bits_prs1 : slot_uop_prs1;
wire [5:0] next_uop_prs2 = io_in_uop_valid ? io_in_uop_bits_prs2 : slot_uop_prs2;
wire [5:0] next_uop_prs3 = io_in_uop_valid ? io_in_uop_bits_prs3 : slot_uop_prs3;
always @(posedge clock) begin
if (reset) begin
state <= 2'h0;
p1 <= 1'h0;
p2 <= 1'h0;
p3 <= 1'h0;
ppred <= 1'h0;
slot_uop_uopc <= 7'h0;
slot_uop_pdst <= 6'h0;
slot_uop_bypassable <= 1'h0;
slot_uop_uses_ldq <= 1'h0;
slot_uop_uses_stq <= 1'h0;
slot_uop_dst_rtype <= 2'h2;
slot_uop_fp_val <= 1'h0;
end
else begin
if (io_kill)
state <= 2'h0;
else if (io_in_uop_valid)
state <= io_in_uop_bits_iw_state;
else if (io_clear | _GEN_6)
state <= 2'h0;
else if (_GEN_0)
state <= 2'h1;
p1 <= io_wakeup_ports_1_valid & io_wakeup_ports_1_bits_pdst == next_uop_prs1 | io_wakeup_ports_0_valid & io_wakeup_ports_0_bits_pdst == next_uop_prs1 | (io_in_uop_valid ? ~io_in_uop_bits_prs1_busy : p1);
p2 <= io_wakeup_ports_1_valid & io_wakeup_ports_1_bits_pdst == next_uop_prs2 | io_wakeup_ports_0_valid & io_wakeup_ports_0_bits_pdst == next_uop_prs2 | (io_in_uop_valid ? ~io_in_uop_bits_prs2_busy : p2);
p3 <= io_wakeup_ports_1_valid & io_wakeup_ports_1_bits_pdst == next_uop_prs3 | io_wakeup_ports_0_valid & io_wakeup_ports_0_bits_pdst == next_uop_prs3 | (io_in_uop_valid ? ~io_in_uop_bits_prs3_busy : p3);
if (io_in_uop_valid) begin
ppred <= ~io_in_uop_bits_ppred_busy;
slot_uop_uopc <= io_in_uop_bits_uopc;
slot_uop_pdst <= io_in_uop_bits_pdst;
slot_uop_bypassable <= io_in_uop_bits_bypassable;
slot_uop_uses_ldq <= io_in_uop_bits_uses_ldq;
slot_uop_uses_stq <= io_in_uop_bits_uses_stq;
slot_uop_dst_rtype <= io_in_uop_bits_dst_rtype;
slot_uop_fp_val <= io_in_uop_bits_fp_val;
end
else if (_GEN_4) begin
end
else
slot_uop_uopc <= 7'h3;
end
if (io_in_uop_valid) begin
slot_uop_inst <= io_in_uop_bits_inst;
slot_uop_debug_inst <= io_in_uop_bits_debug_inst;
slot_uop_is_rvc <= io_in_uop_bits_is_rvc;
slot_uop_debug_pc <= io_in_uop_bits_debug_pc;
slot_uop_iq_type <= io_in_uop_bits_iq_type;
slot_uop_fu_code <= io_in_uop_bits_fu_code;
slot_uop_iw_state <= io_in_uop_bits_iw_state;
slot_uop_is_br <= io_in_uop_bits_is_br;
slot_uop_is_jalr <= io_in_uop_bits_is_jalr;
slot_uop_is_jal <= io_in_uop_bits_is_jal;
slot_uop_is_sfb <= io_in_uop_bits_is_sfb;
slot_uop_br_tag <= io_in_uop_bits_br_tag;
slot_uop_ftq_idx <= io_in_uop_bits_ftq_idx;
slot_uop_edge_inst <= io_in_uop_bits_edge_inst;
slot_uop_pc_lob <= io_in_uop_bits_pc_lob;
slot_uop_taken <= io_in_uop_bits_taken;
slot_uop_imm_packed <= io_in_uop_bits_imm_packed;
slot_uop_csr_addr <= io_in_uop_bits_csr_addr;
slot_uop_rob_idx <= io_in_uop_bits_rob_idx;
slot_uop_ldq_idx <= io_in_uop_bits_ldq_idx;
slot_uop_stq_idx <= io_in_uop_bits_stq_idx;
slot_uop_rxq_idx <= io_in_uop_bits_rxq_idx;
slot_uop_prs1 <= io_in_uop_bits_prs1;
slot_uop_prs2 <= io_in_uop_bits_prs2;
slot_uop_prs3 <= io_in_uop_bits_prs3;
slot_uop_ppred <= io_in_uop_bits_ppred;
slot_uop_prs1_busy <= io_in_uop_bits_prs1_busy;
slot_uop_prs2_busy <= io_in_uop_bits_prs2_busy;
slot_uop_prs3_busy <= io_in_uop_bits_prs3_busy;
slot_uop_ppred_busy <= io_in_uop_bits_ppred_busy;
slot_uop_stale_pdst <= io_in_uop_bits_stale_pdst;
slot_uop_exception <= io_in_uop_bits_exception;
slot_uop_exc_cause <= io_in_uop_bits_exc_cause;
slot_uop_mem_cmd <= io_in_uop_bits_mem_cmd;
slot_uop_mem_size <= io_in_uop_bits_mem_size;
slot_uop_mem_signed <= io_in_uop_bits_mem_signed;
slot_uop_is_fence <= io_in_uop_bits_is_fence;
slot_uop_is_fencei <= io_in_uop_bits_is_fencei;
slot_uop_is_amo <= io_in_uop_bits_is_amo;
slot_uop_is_sys_pc2epc <= io_in_uop_bits_is_sys_pc2epc;
slot_uop_is_unique <= io_in_uop_bits_is_unique;
slot_uop_flush_on_commit <= io_in_uop_bits_flush_on_commit;
slot_uop_ldst_is_rs1 <= io_in_uop_bits_ldst_is_rs1;
slot_uop_ldst <= io_in_uop_bits_ldst;
slot_uop_lrs1 <= io_in_uop_bits_lrs1;
slot_uop_lrs2 <= io_in_uop_bits_lrs2;
slot_uop_lrs3 <= io_in_uop_bits_lrs3;
slot_uop_ldst_val <= io_in_uop_bits_ldst_val;
slot_uop_lrs1_rtype <= io_in_uop_bits_lrs1_rtype;
slot_uop_lrs2_rtype <= io_in_uop_bits_lrs2_rtype;
slot_uop_frs3_en <= io_in_uop_bits_frs3_en;
slot_uop_fp_single <= io_in_uop_bits_fp_single;
slot_uop_xcpt_pf_if <= io_in_uop_bits_xcpt_pf_if;
slot_uop_xcpt_ae_if <= io_in_uop_bits_xcpt_ae_if;
slot_uop_xcpt_ma_if <= io_in_uop_bits_xcpt_ma_if;
slot_uop_bp_debug_if <= io_in_uop_bits_bp_debug_if;
slot_uop_bp_xcpt_if <= io_in_uop_bits_bp_xcpt_if;
slot_uop_debug_fsrc <= io_in_uop_bits_debug_fsrc;
slot_uop_debug_tsrc <= io_in_uop_bits_debug_tsrc;
end
else begin
if (_GEN_4) begin
end
else
slot_uop_lrs1_rtype <= 2'h2;
if (_GEN_5) begin
end
else
slot_uop_lrs2_rtype <= 2'h2;
end
slot_uop_br_mask <= io_in_uop_valid ? io_in_uop_bits_br_mask : next_br_mask;
end
assign io_valid = |state;
assign io_will_be_valid = (|state) & ~(io_grant & (_may_vacate_T | _may_vacate_T_1 & p1 & p2 & ppred));
assign io_request = _may_vacate_T ? p1 & p2 & p3 & ppred & ~io_kill : _GEN & (p1 | p2) & ppred & ~io_kill;
assign io_out_uop_uopc = _GEN_4 ? slot_uop_uopc : 7'h3;
assign io_out_uop_inst = slot_uop_inst;
assign io_out_uop_debug_inst = slot_uop_debug_inst;
assign io_out_uop_is_rvc = slot_uop_is_rvc;
assign io_out_uop_debug_pc = slot_uop_debug_pc;
assign io_out_uop_iq_type = slot_uop_iq_type;
assign io_out_uop_fu_code = slot_uop_fu_code;
assign io_out_uop_iw_state = _GEN_6 ? 2'h0 : _GEN_0 ? 2'h1 : state;
assign io_out_uop_is_br = slot_uop_is_br;
assign io_out_uop_is_jalr = slot_uop_is_jalr;
assign io_out_uop_is_jal = slot_uop_is_jal;
assign io_out_uop_is_sfb = slot_uop_is_sfb;
assign io_out_uop_br_mask = next_br_mask;
assign io_out_uop_br_tag = slot_uop_br_tag;
assign io_out_uop_ftq_idx = slot_uop_ftq_idx;
assign io_out_uop_edge_inst = slot_uop_edge_inst;
assign io_out_uop_pc_lob = slot_uop_pc_lob;
assign io_out_uop_taken = slot_uop_taken;
assign io_out_uop_imm_packed = slot_uop_imm_packed;
assign io_out_uop_csr_addr = slot_uop_csr_addr;
assign io_out_uop_rob_idx = slot_uop_rob_idx;
assign io_out_uop_ldq_idx = slot_uop_ldq_idx;
assign io_out_uop_stq_idx = slot_uop_stq_idx;
assign io_out_uop_rxq_idx = slot_uop_rxq_idx;
assign io_out_uop_pdst = slot_uop_pdst;
assign io_out_uop_prs1 = slot_uop_prs1;
assign io_out_uop_prs2 = slot_uop_prs2;
assign io_out_uop_prs3 = slot_uop_prs3;
assign io_out_uop_ppred = slot_uop_ppred;
assign io_out_uop_prs1_busy = ~p1;
assign io_out_uop_prs2_busy = ~p2;
assign io_out_uop_prs3_busy = ~p3;
assign io_out_uop_ppred_busy = ~ppred;
assign io_out_uop_stale_pdst = slot_uop_stale_pdst;
assign io_out_uop_exception = slot_uop_exception;
assign io_out_uop_exc_cause = slot_uop_exc_cause;
assign io_out_uop_bypassable = slot_uop_bypassable;
assign io_out_uop_mem_cmd = slot_uop_mem_cmd;
assign io_out_uop_mem_size = slot_uop_mem_size;
assign io_out_uop_mem_signed = slot_uop_mem_signed;
assign io_out_uop_is_fence = slot_uop_is_fence;
assign io_out_uop_is_fencei = slot_uop_is_fencei;
assign io_out_uop_is_amo = slot_uop_is_amo;
assign io_out_uop_uses_ldq = slot_uop_uses_ldq;
assign io_out_uop_uses_stq = slot_uop_uses_stq;
assign io_out_uop_is_sys_pc2epc = slot_uop_is_sys_pc2epc;
assign io_out_uop_is_unique = slot_uop_is_unique;
assign io_out_uop_flush_on_commit = slot_uop_flush_on_commit;
assign io_out_uop_ldst_is_rs1 = slot_uop_ldst_is_rs1;
assign io_out_uop_ldst = slot_uop_ldst;
assign io_out_uop_lrs1 = slot_uop_lrs1;
assign io_out_uop_lrs2 = slot_uop_lrs2;
assign io_out_uop_lrs3 = slot_uop_lrs3;
assign io_out_uop_ldst_val = slot_uop_ldst_val;
assign io_out_uop_dst_rtype = slot_uop_dst_rtype;
assign io_out_uop_lrs1_rtype = _GEN_4 ? slot_uop_lrs1_rtype : 2'h2;
assign io_out_uop_lrs2_rtype = _GEN_5 ? slot_uop_lrs2_rtype : 2'h2;
assign io_out_uop_frs3_en = slot_uop_frs3_en;
assign io_out_uop_fp_val = slot_uop_fp_val;
assign io_out_uop_fp_single = slot_uop_fp_single;
assign io_out_uop_xcpt_pf_if = slot_uop_xcpt_pf_if;
assign io_out_uop_xcpt_ae_if = slot_uop_xcpt_ae_if;
assign io_out_uop_xcpt_ma_if = slot_uop_xcpt_ma_if;
assign io_out_uop_bp_debug_if = slot_uop_bp_debug_if;
assign io_out_uop_bp_xcpt_if = slot_uop_bp_xcpt_if;
assign io_out_uop_debug_fsrc = slot_uop_debug_fsrc;
assign io_out_uop_debug_tsrc = slot_uop_debug_tsrc;
assign io_uop_uopc = _GEN_9 ? slot_uop_uopc : 7'h3;
assign io_uop_inst = slot_uop_inst;
assign io_uop_debug_inst = slot_uop_debug_inst;
assign io_uop_is_rvc = slot_uop_is_rvc;
assign io_uop_debug_pc = slot_uop_debug_pc;
assign io_uop_iq_type = slot_uop_iq_type;
assign io_uop_fu_code = slot_uop_fu_code;
assign io_uop_iw_state = slot_uop_iw_state;
assign io_uop_is_br = slot_uop_is_br;
assign io_uop_is_jalr = slot_uop_is_jalr;
assign io_uop_is_jal = slot_uop_is_jal;
assign io_uop_is_sfb = slot_uop_is_sfb;
assign io_uop_br_mask = slot_uop_br_mask;
assign io_uop_br_tag = slot_uop_br_tag;
assign io_uop_ftq_idx = slot_uop_ftq_idx;
assign io_uop_edge_inst = slot_uop_edge_inst;
assign io_uop_pc_lob = slot_uop_pc_lob;
assign io_uop_taken = slot_uop_taken;
assign io_uop_imm_packed = slot_uop_imm_packed;
assign io_uop_csr_addr = slot_uop_csr_addr;
assign io_uop_rob_idx = slot_uop_rob_idx;
assign io_uop_ldq_idx = slot_uop_ldq_idx;
assign io_uop_stq_idx = slot_uop_stq_idx;
assign io_uop_rxq_idx = slot_uop_rxq_idx;
assign io_uop_pdst = slot_uop_pdst;
assign io_uop_prs1 = slot_uop_prs1;
assign io_uop_prs2 = slot_uop_prs2;
assign io_uop_prs3 = slot_uop_prs3;
assign io_uop_ppred = slot_uop_ppred;
assign io_uop_prs1_busy = slot_uop_prs1_busy;
assign io_uop_prs2_busy = slot_uop_prs2_busy;
assign io_uop_prs3_busy = slot_uop_prs3_busy;
assign io_uop_ppred_busy = slot_uop_ppred_busy;
assign io_uop_stale_pdst = slot_uop_stale_pdst;
assign io_uop_exception = slot_uop_exception;
assign io_uop_exc_cause = slot_uop_exc_cause;
assign io_uop_bypassable = slot_uop_bypassable;
assign io_uop_mem_cmd = slot_uop_mem_cmd;
assign io_uop_mem_size = slot_uop_mem_size;
assign io_uop_mem_signed = slot_uop_mem_signed;
assign io_uop_is_fence = slot_uop_is_fence;
assign io_uop_is_fencei = slot_uop_is_fencei;
assign io_uop_is_amo = slot_uop_is_amo;
assign io_uop_uses_ldq = slot_uop_uses_ldq;
assign io_uop_uses_stq = slot_uop_uses_stq;
assign io_uop_is_sys_pc2epc = slot_uop_is_sys_pc2epc;
assign io_uop_is_unique = slot_uop_is_unique;
assign io_uop_flush_on_commit = slot_uop_flush_on_commit;
assign io_uop_ldst_is_rs1 = slot_uop_ldst_is_rs1;
assign io_uop_ldst = slot_uop_ldst;
assign io_uop_lrs1 = slot_uop_lrs1;
assign io_uop_lrs2 = slot_uop_lrs2;
assign io_uop_lrs3 = slot_uop_lrs3;
assign io_uop_ldst_val = slot_uop_ldst_val;
assign io_uop_dst_rtype = slot_uop_dst_rtype;
assign io_uop_lrs1_rtype = _GEN_9 ? slot_uop_lrs1_rtype : 2'h2;
assign io_uop_lrs2_rtype = ~_may_vacate_T_1 | _GEN_7 | ~_GEN_8 ? slot_uop_lrs2_rtype : 2'h2;
assign io_uop_frs3_en = slot_uop_frs3_en;
assign io_uop_fp_val = slot_uop_fp_val;
assign io_uop_fp_single = slot_uop_fp_single;
assign io_uop_xcpt_pf_if = slot_uop_xcpt_pf_if;
assign io_uop_xcpt_ae_if = slot_uop_xcpt_ae_if;
assign io_uop_xcpt_ma_if = slot_uop_xcpt_ma_if;
assign io_uop_bp_debug_if = slot_uop_bp_debug_if;
assign io_uop_bp_xcpt_if = slot_uop_bp_xcpt_if;
assign io_uop_debug_fsrc = slot_uop_debug_fsrc;
assign io_uop_debug_tsrc = slot_uop_debug_tsrc;
endmodule |
Generate the Verilog code corresponding to this Chisel code package testchipip.serdes
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config._
class GenericSerializer[T <: Data](t: T, flitWidth: Int) extends Module {
override def desiredName = s"GenericSerializer_${t.typeName}w${t.getWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(t))
val out = Decoupled(new Flit(flitWidth))
val busy = Output(Bool())
})
val dataBits = t.getWidth.max(flitWidth)
val dataBeats = (dataBits - 1) / flitWidth + 1
require(dataBeats >= 1)
val data = Reg(Vec(dataBeats, UInt(flitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready && beat === 0.U
io.out.valid := io.in.valid || beat =/= 0.U
io.out.bits.flit := Mux(beat === 0.U, io.in.bits.asUInt, data(beat))
when (io.out.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) {
data := io.in.bits.asTypeOf(Vec(dataBeats, UInt(flitWidth.W)))
data(0) := DontCare // unused, DCE this
}
}
io.busy := io.out.valid
}
class GenericDeserializer[T <: Data](t: T, flitWidth: Int) extends Module {
override def desiredName = s"GenericDeserializer_${t.typeName}w${t.getWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(t)
val busy = Output(Bool())
})
val dataBits = t.getWidth.max(flitWidth)
val dataBeats = (dataBits - 1) / flitWidth + 1
require(dataBeats >= 1)
val data = Reg(Vec(dataBeats-1, UInt(flitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready || beat =/= (dataBeats-1).U
io.out.valid := io.in.valid && beat === (dataBeats-1).U
io.out.bits := (if (dataBeats == 1) {
io.in.bits.flit.asTypeOf(t)
} else {
Cat(io.in.bits.flit, data.asUInt).asTypeOf(t)
})
when (io.in.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
if (dataBeats > 1) {
when (beat =/= (dataBeats-1).U) {
data(beat(log2Ceil(dataBeats-1)-1,0)) := io.in.bits.flit
}
}
}
io.busy := beat =/= 0.U
}
class FlitToPhit(flitWidth: Int, phitWidth: Int) extends Module {
override def desiredName = s"FlitToPhit_f${flitWidth}_p${phitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Phit(phitWidth))
})
require(flitWidth >= phitWidth)
val dataBeats = (flitWidth - 1) / phitWidth + 1
val data = Reg(Vec(dataBeats-1, UInt(phitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready && beat === 0.U
io.out.valid := io.in.valid || beat =/= 0.U
io.out.bits.phit := (if (dataBeats == 1) io.in.bits.flit else Mux(beat === 0.U, io.in.bits.flit, data(beat-1.U)))
when (io.out.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) {
data := io.in.bits.asTypeOf(Vec(dataBeats, UInt(phitWidth.W))).tail
}
}
}
object FlitToPhit {
def apply(flit: DecoupledIO[Flit], phitWidth: Int): DecoupledIO[Phit] = {
val flit2phit = Module(new FlitToPhit(flit.bits.flitWidth, phitWidth))
flit2phit.io.in <> flit
flit2phit.io.out
}
}
class PhitToFlit(flitWidth: Int, phitWidth: Int) extends Module {
override def desiredName = s"PhitToFlit_p${phitWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Phit(phitWidth)))
val out = Decoupled(new Flit(flitWidth))
})
require(flitWidth >= phitWidth)
val dataBeats = (flitWidth - 1) / phitWidth + 1
val data = Reg(Vec(dataBeats-1, UInt(phitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready || beat =/= (dataBeats-1).U
io.out.valid := io.in.valid && beat === (dataBeats-1).U
io.out.bits.flit := (if (dataBeats == 1) io.in.bits.phit else Cat(io.in.bits.phit, data.asUInt))
when (io.in.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
if (dataBeats > 1) {
when (beat =/= (dataBeats-1).U) {
data(beat) := io.in.bits.phit
}
}
}
}
object PhitToFlit {
def apply(phit: DecoupledIO[Phit], flitWidth: Int): DecoupledIO[Flit] = {
val phit2flit = Module(new PhitToFlit(flitWidth, phit.bits.phitWidth))
phit2flit.io.in <> phit
phit2flit.io.out
}
def apply(phit: ValidIO[Phit], flitWidth: Int): ValidIO[Flit] = {
val phit2flit = Module(new PhitToFlit(flitWidth, phit.bits.phitWidth))
phit2flit.io.in.valid := phit.valid
phit2flit.io.in.bits := phit.bits
when (phit.valid) { assert(phit2flit.io.in.ready) }
val out = Wire(Valid(new Flit(flitWidth)))
out.valid := phit2flit.io.out.valid
out.bits := phit2flit.io.out.bits
phit2flit.io.out.ready := true.B
out
}
}
class PhitArbiter(phitWidth: Int, flitWidth: Int, channels: Int) extends Module {
override def desiredName = s"PhitArbiter_p${phitWidth}_f${flitWidth}_n${channels}"
val io = IO(new Bundle {
val in = Flipped(Vec(channels, Decoupled(new Phit(phitWidth))))
val out = Decoupled(new Phit(phitWidth))
})
if (channels == 1) {
io.out <> io.in(0)
} else {
val headerWidth = log2Ceil(channels)
val headerBeats = (headerWidth - 1) / phitWidth + 1
val flitBeats = (flitWidth - 1) / phitWidth + 1
val beats = headerBeats + flitBeats
val beat = RegInit(0.U(log2Ceil(beats).W))
val chosen_reg = Reg(UInt(headerWidth.W))
val chosen_prio = PriorityEncoder(io.in.map(_.valid))
val chosen = Mux(beat === 0.U, chosen_prio, chosen_reg)
val header_idx = if (headerBeats == 1) 0.U else beat(log2Ceil(headerBeats)-1,0)
io.out.valid := VecInit(io.in.map(_.valid))(chosen)
io.out.bits.phit := Mux(beat < headerBeats.U,
chosen.asTypeOf(Vec(headerBeats, UInt(phitWidth.W)))(header_idx),
VecInit(io.in.map(_.bits.phit))(chosen))
for (i <- 0 until channels) {
io.in(i).ready := io.out.ready && beat >= headerBeats.U && chosen_reg === i.U
}
when (io.out.fire) {
beat := Mux(beat === (beats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) { chosen_reg := chosen_prio }
}
}
}
class PhitDemux(phitWidth: Int, flitWidth: Int, channels: Int) extends Module {
override def desiredName = s"PhitDemux_p${phitWidth}_f${flitWidth}_n${channels}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Phit(phitWidth)))
val out = Vec(channels, Decoupled(new Phit(phitWidth)))
})
if (channels == 1) {
io.out(0) <> io.in
} else {
val headerWidth = log2Ceil(channels)
val headerBeats = (headerWidth - 1) / phitWidth + 1
val flitBeats = (flitWidth - 1) / phitWidth + 1
val beats = headerBeats + flitBeats
val beat = RegInit(0.U(log2Ceil(beats).W))
val channel_vec = Reg(Vec(headerBeats, UInt(phitWidth.W)))
val channel = channel_vec.asUInt(log2Ceil(channels)-1,0)
val header_idx = if (headerBeats == 1) 0.U else beat(log2Ceil(headerBeats)-1,0)
io.in.ready := beat < headerBeats.U || VecInit(io.out.map(_.ready))(channel)
for (c <- 0 until channels) {
io.out(c).valid := io.in.valid && beat >= headerBeats.U && channel === c.U
io.out(c).bits.phit := io.in.bits.phit
}
when (io.in.fire) {
beat := Mux(beat === (beats-1).U, 0.U, beat + 1.U)
when (beat < headerBeats.U) {
channel_vec(header_idx) := io.in.bits.phit
}
}
}
}
class DecoupledFlitToCreditedFlit(flitWidth: Int, bufferSz: Int) extends Module {
override def desiredName = s"DecoupledFlitToCreditedFlit_f${flitWidth}_b${bufferSz}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Flit(flitWidth))
val credit = Flipped(Decoupled(new Flit(flitWidth)))
})
val creditWidth = log2Ceil(bufferSz)
require(creditWidth <= flitWidth)
val credits = RegInit(0.U((creditWidth+1).W))
val credit_incr = io.out.fire
val credit_decr = io.credit.fire
when (credit_incr || credit_decr) {
credits := credits + credit_incr - Mux(io.credit.valid, io.credit.bits.flit +& 1.U, 0.U)
}
io.out.valid := io.in.valid && credits < bufferSz.U
io.out.bits.flit := io.in.bits.flit
io.in.ready := io.out.ready && credits < bufferSz.U
io.credit.ready := true.B
}
class CreditedFlitToDecoupledFlit(flitWidth: Int, bufferSz: Int) extends Module {
override def desiredName = s"CreditedFlitToDecoupledFlit_f${flitWidth}_b${bufferSz}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Flit(flitWidth))
val credit = Decoupled(new Flit(flitWidth))
})
val creditWidth = log2Ceil(bufferSz)
require(creditWidth <= flitWidth)
val buffer = Module(new Queue(new Flit(flitWidth), bufferSz))
val credits = RegInit(0.U((creditWidth+1).W))
val credit_incr = buffer.io.deq.fire
val credit_decr = io.credit.fire
when (credit_incr || credit_decr) {
credits := credit_incr + Mux(credit_decr, 0.U, credits)
}
buffer.io.enq.valid := io.in.valid
buffer.io.enq.bits := io.in.bits
io.in.ready := true.B
when (io.in.valid) { assert(buffer.io.enq.ready) }
io.out <> buffer.io.deq
io.credit.valid := credits =/= 0.U
io.credit.bits.flit := credits - 1.U
} | module GenericDeserializer_TLBeatw88_f32_TestHarness_UNIQUIFIED(
input clock,
input reset,
output io_in_ready,
input io_in_valid,
input [31:0] io_in_bits_flit,
input io_out_ready,
output io_out_valid,
output [85:0] io_out_bits_payload,
output io_out_bits_head,
output io_out_bits_tail
);
reg [31:0] data_0;
reg [31:0] data_1;
reg [1:0] beat;
wire io_in_ready_0 = io_out_ready | beat != 2'h2;
wire _beat_T = beat == 2'h2;
wire _GEN = io_in_ready_0 & io_in_valid;
wire _GEN_0 = beat == 2'h2;
always @(posedge clock) begin
if (~_GEN | _GEN_0 | beat[0]) begin
end
else
data_0 <= io_in_bits_flit;
if (~_GEN | _GEN_0 | ~(beat[0])) begin
end
else
data_1 <= io_in_bits_flit;
if (reset)
beat <= 2'h0;
else if (_GEN)
beat <= _beat_T ? 2'h0 : beat + 2'h1;
end
assign io_in_ready = io_in_ready_0;
assign io_out_valid = io_in_valid & _beat_T;
assign io_out_bits_payload = {io_in_bits_flit[23:0], data_1, data_0[31:2]};
assign io_out_bits_head = data_0[1];
assign io_out_bits_tail = data_0[0];
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tile
import chisel3._
import chisel3.util._
import chisel3.{DontCare, WireInit, withClock, withReset}
import chisel3.experimental.SourceInfo
import chisel3.experimental.dataview._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.rocket._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property
case class FPUParams(
minFLen: Int = 32,
fLen: Int = 64,
divSqrt: Boolean = true,
sfmaLatency: Int = 3,
dfmaLatency: Int = 4,
fpmuLatency: Int = 2,
ifpuLatency: Int = 2
)
object FPConstants
{
val RM_SZ = 3
val FLAGS_SZ = 5
}
trait HasFPUCtrlSigs {
val ldst = Bool()
val wen = Bool()
val ren1 = Bool()
val ren2 = Bool()
val ren3 = Bool()
val swap12 = Bool()
val swap23 = Bool()
val typeTagIn = UInt(2.W)
val typeTagOut = UInt(2.W)
val fromint = Bool()
val toint = Bool()
val fastpipe = Bool()
val fma = Bool()
val div = Bool()
val sqrt = Bool()
val wflags = Bool()
val vec = Bool()
}
class FPUCtrlSigs extends Bundle with HasFPUCtrlSigs
class FPUDecoder(implicit p: Parameters) extends FPUModule()(p) {
val io = IO(new Bundle {
val inst = Input(Bits(32.W))
val sigs = Output(new FPUCtrlSigs())
})
private val X2 = BitPat.dontCare(2)
val default = List(X,X,X,X,X,X,X,X2,X2,X,X,X,X,X,X,X,N)
val h: Array[(BitPat, List[BitPat])] =
Array(FLH -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSH -> List(Y,N,N,Y,N,Y,X, I, H,N,Y,N,N,N,N,N,N),
FMV_H_X -> List(N,Y,N,N,N,X,X, H, I,Y,N,N,N,N,N,N,N),
FCVT_H_W -> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_WU-> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_L -> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_LU-> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FMV_X_H -> List(N,N,Y,N,N,N,X, I, H,N,Y,N,N,N,N,N,N),
FCLASS_H -> List(N,N,Y,N,N,N,X, H, H,N,Y,N,N,N,N,N,N),
FCVT_W_H -> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_H-> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_H -> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_H-> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_S_H -> List(N,Y,Y,N,N,N,X, H, S,N,N,Y,N,N,N,Y,N),
FCVT_H_S -> List(N,Y,Y,N,N,N,X, S, H,N,N,Y,N,N,N,Y,N),
FEQ_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FLT_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FLE_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FSGNJ_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FSGNJN_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FSGNJX_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FMIN_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,Y,N),
FMAX_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,Y,N),
FADD_H -> List(N,Y,Y,Y,N,N,Y, H, H,N,N,N,Y,N,N,Y,N),
FSUB_H -> List(N,Y,Y,Y,N,N,Y, H, H,N,N,N,Y,N,N,Y,N),
FMUL_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,N,Y,N,N,Y,N),
FMADD_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FMSUB_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FNMADD_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FNMSUB_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FDIV_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,N,N,Y,N,Y,N),
FSQRT_H -> List(N,Y,Y,N,N,N,X, H, H,N,N,N,N,N,Y,Y,N))
val f: Array[(BitPat, List[BitPat])] =
Array(FLW -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSW -> List(Y,N,N,Y,N,Y,X, I, S,N,Y,N,N,N,N,N,N),
FMV_W_X -> List(N,Y,N,N,N,X,X, S, I,Y,N,N,N,N,N,N,N),
FCVT_S_W -> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_WU-> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_L -> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_LU-> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FMV_X_W -> List(N,N,Y,N,N,N,X, I, S,N,Y,N,N,N,N,N,N),
FCLASS_S -> List(N,N,Y,N,N,N,X, S, S,N,Y,N,N,N,N,N,N),
FCVT_W_S -> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_S-> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_S -> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_S-> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FEQ_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FLT_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FLE_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FSGNJ_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FSGNJN_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FSGNJX_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FMIN_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,Y,N),
FMAX_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,Y,N),
FADD_S -> List(N,Y,Y,Y,N,N,Y, S, S,N,N,N,Y,N,N,Y,N),
FSUB_S -> List(N,Y,Y,Y,N,N,Y, S, S,N,N,N,Y,N,N,Y,N),
FMUL_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,N,Y,N,N,Y,N),
FMADD_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FMSUB_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FNMADD_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FNMSUB_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FDIV_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,N,N,Y,N,Y,N),
FSQRT_S -> List(N,Y,Y,N,N,N,X, S, S,N,N,N,N,N,Y,Y,N))
val d: Array[(BitPat, List[BitPat])] =
Array(FLD -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSD -> List(Y,N,N,Y,N,Y,X, I, D,N,Y,N,N,N,N,N,N),
FMV_D_X -> List(N,Y,N,N,N,X,X, D, I,Y,N,N,N,N,N,N,N),
FCVT_D_W -> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_WU-> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_L -> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_LU-> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FMV_X_D -> List(N,N,Y,N,N,N,X, I, D,N,Y,N,N,N,N,N,N),
FCLASS_D -> List(N,N,Y,N,N,N,X, D, D,N,Y,N,N,N,N,N,N),
FCVT_W_D -> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_D-> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_D -> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_D-> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_S_D -> List(N,Y,Y,N,N,N,X, D, S,N,N,Y,N,N,N,Y,N),
FCVT_D_S -> List(N,Y,Y,N,N,N,X, S, D,N,N,Y,N,N,N,Y,N),
FEQ_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FLT_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FLE_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FSGNJ_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FSGNJN_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FSGNJX_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FMIN_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,Y,N),
FMAX_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,Y,N),
FADD_D -> List(N,Y,Y,Y,N,N,Y, D, D,N,N,N,Y,N,N,Y,N),
FSUB_D -> List(N,Y,Y,Y,N,N,Y, D, D,N,N,N,Y,N,N,Y,N),
FMUL_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,N,Y,N,N,Y,N),
FMADD_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FMSUB_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FNMADD_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FNMSUB_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FDIV_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,N,N,Y,N,Y,N),
FSQRT_D -> List(N,Y,Y,N,N,N,X, D, D,N,N,N,N,N,Y,Y,N))
val fcvt_hd: Array[(BitPat, List[BitPat])] =
Array(FCVT_H_D -> List(N,Y,Y,N,N,N,X, D, H,N,N,Y,N,N,N,Y,N),
FCVT_D_H -> List(N,Y,Y,N,N,N,X, H, D,N,N,Y,N,N,N,Y,N))
val vfmv_f_s: Array[(BitPat, List[BitPat])] =
Array(VFMV_F_S -> List(N,Y,N,N,N,N,X,X2,X2,N,N,N,N,N,N,N,Y))
val insns = ((minFLen, fLen) match {
case (32, 32) => f
case (16, 32) => h ++ f
case (32, 64) => f ++ d
case (16, 64) => h ++ f ++ d ++ fcvt_hd
case other => throw new Exception(s"minFLen = ${minFLen} & fLen = ${fLen} is an unsupported configuration")
}) ++ (if (usingVector) vfmv_f_s else Array[(BitPat, List[BitPat])]())
val decoder = DecodeLogic(io.inst, default, insns)
val s = io.sigs
val sigs = Seq(s.ldst, s.wen, s.ren1, s.ren2, s.ren3, s.swap12,
s.swap23, s.typeTagIn, s.typeTagOut, s.fromint, s.toint,
s.fastpipe, s.fma, s.div, s.sqrt, s.wflags, s.vec)
sigs zip decoder map {case(s,d) => s := d}
}
class FPUCoreIO(implicit p: Parameters) extends CoreBundle()(p) {
val hartid = Input(UInt(hartIdLen.W))
val time = Input(UInt(xLen.W))
val inst = Input(Bits(32.W))
val fromint_data = Input(Bits(xLen.W))
val fcsr_rm = Input(Bits(FPConstants.RM_SZ.W))
val fcsr_flags = Valid(Bits(FPConstants.FLAGS_SZ.W))
val v_sew = Input(UInt(3.W))
val store_data = Output(Bits(fLen.W))
val toint_data = Output(Bits(xLen.W))
val ll_resp_val = Input(Bool())
val ll_resp_type = Input(Bits(3.W))
val ll_resp_tag = Input(UInt(5.W))
val ll_resp_data = Input(Bits(fLen.W))
val valid = Input(Bool())
val fcsr_rdy = Output(Bool())
val nack_mem = Output(Bool())
val illegal_rm = Output(Bool())
val killx = Input(Bool())
val killm = Input(Bool())
val dec = Output(new FPUCtrlSigs())
val sboard_set = Output(Bool())
val sboard_clr = Output(Bool())
val sboard_clra = Output(UInt(5.W))
val keep_clock_enabled = Input(Bool())
}
class FPUIO(implicit p: Parameters) extends FPUCoreIO ()(p) {
val cp_req = Flipped(Decoupled(new FPInput())) //cp doesn't pay attn to kill sigs
val cp_resp = Decoupled(new FPResult())
}
class FPResult(implicit p: Parameters) extends CoreBundle()(p) {
val data = Bits((fLen+1).W)
val exc = Bits(FPConstants.FLAGS_SZ.W)
}
class IntToFPInput(implicit p: Parameters) extends CoreBundle()(p) with HasFPUCtrlSigs {
val rm = Bits(FPConstants.RM_SZ.W)
val typ = Bits(2.W)
val in1 = Bits(xLen.W)
}
class FPInput(implicit p: Parameters) extends CoreBundle()(p) with HasFPUCtrlSigs {
val rm = Bits(FPConstants.RM_SZ.W)
val fmaCmd = Bits(2.W)
val typ = Bits(2.W)
val fmt = Bits(2.W)
val in1 = Bits((fLen+1).W)
val in2 = Bits((fLen+1).W)
val in3 = Bits((fLen+1).W)
}
case class FType(exp: Int, sig: Int) {
def ieeeWidth = exp + sig
def recodedWidth = ieeeWidth + 1
def ieeeQNaN = ((BigInt(1) << (ieeeWidth - 1)) - (BigInt(1) << (sig - 2))).U(ieeeWidth.W)
def qNaN = ((BigInt(7) << (exp + sig - 3)) + (BigInt(1) << (sig - 2))).U(recodedWidth.W)
def isNaN(x: UInt) = x(sig + exp - 1, sig + exp - 3).andR
def isSNaN(x: UInt) = isNaN(x) && !x(sig - 2)
def classify(x: UInt) = {
val sign = x(sig + exp)
val code = x(exp + sig - 1, exp + sig - 3)
val codeHi = code(2, 1)
val isSpecial = codeHi === 3.U
val isHighSubnormalIn = x(exp + sig - 3, sig - 1) < 2.U
val isSubnormal = code === 1.U || codeHi === 1.U && isHighSubnormalIn
val isNormal = codeHi === 1.U && !isHighSubnormalIn || codeHi === 2.U
val isZero = code === 0.U
val isInf = isSpecial && !code(0)
val isNaN = code.andR
val isSNaN = isNaN && !x(sig-2)
val isQNaN = isNaN && x(sig-2)
Cat(isQNaN, isSNaN, isInf && !sign, isNormal && !sign,
isSubnormal && !sign, isZero && !sign, isZero && sign,
isSubnormal && sign, isNormal && sign, isInf && sign)
}
// convert between formats, ignoring rounding, range, NaN
def unsafeConvert(x: UInt, to: FType) = if (this == to) x else {
val sign = x(sig + exp)
val fractIn = x(sig - 2, 0)
val expIn = x(sig + exp - 1, sig - 1)
val fractOut = fractIn << to.sig >> sig
val expOut = {
val expCode = expIn(exp, exp - 2)
val commonCase = (expIn + (1 << to.exp).U) - (1 << exp).U
Mux(expCode === 0.U || expCode >= 6.U, Cat(expCode, commonCase(to.exp - 3, 0)), commonCase(to.exp, 0))
}
Cat(sign, expOut, fractOut)
}
private def ieeeBundle = {
val expWidth = exp
class IEEEBundle extends Bundle {
val sign = Bool()
val exp = UInt(expWidth.W)
val sig = UInt((ieeeWidth-expWidth-1).W)
}
new IEEEBundle
}
def unpackIEEE(x: UInt) = x.asTypeOf(ieeeBundle)
def recode(x: UInt) = hardfloat.recFNFromFN(exp, sig, x)
def ieee(x: UInt) = hardfloat.fNFromRecFN(exp, sig, x)
}
object FType {
val H = new FType(5, 11)
val S = new FType(8, 24)
val D = new FType(11, 53)
val all = List(H, S, D)
}
trait HasFPUParameters {
require(fLen == 0 || FType.all.exists(_.ieeeWidth == fLen))
val minFLen: Int
val fLen: Int
def xLen: Int
val minXLen = 32
val nIntTypes = log2Ceil(xLen/minXLen) + 1
def floatTypes = FType.all.filter(t => minFLen <= t.ieeeWidth && t.ieeeWidth <= fLen)
def minType = floatTypes.head
def maxType = floatTypes.last
def prevType(t: FType) = floatTypes(typeTag(t) - 1)
def maxExpWidth = maxType.exp
def maxSigWidth = maxType.sig
def typeTag(t: FType) = floatTypes.indexOf(t)
def typeTagWbOffset = (FType.all.indexOf(minType) + 1).U
def typeTagGroup(t: FType) = (if (floatTypes.contains(t)) typeTag(t) else typeTag(maxType)).U
// typeTag
def H = typeTagGroup(FType.H)
def S = typeTagGroup(FType.S)
def D = typeTagGroup(FType.D)
def I = typeTag(maxType).U
private def isBox(x: UInt, t: FType): Bool = x(t.sig + t.exp, t.sig + t.exp - 4).andR
private def box(x: UInt, xt: FType, y: UInt, yt: FType): UInt = {
require(xt.ieeeWidth == 2 * yt.ieeeWidth)
val swizzledNaN = Cat(
x(xt.sig + xt.exp, xt.sig + xt.exp - 3),
x(xt.sig - 2, yt.recodedWidth - 1).andR,
x(xt.sig + xt.exp - 5, xt.sig),
y(yt.recodedWidth - 2),
x(xt.sig - 2, yt.recodedWidth - 1),
y(yt.recodedWidth - 1),
y(yt.recodedWidth - 3, 0))
Mux(xt.isNaN(x), swizzledNaN, x)
}
// implement NaN unboxing for FU inputs
def unbox(x: UInt, tag: UInt, exactType: Option[FType]): UInt = {
val outType = exactType.getOrElse(maxType)
def helper(x: UInt, t: FType): Seq[(Bool, UInt)] = {
val prev =
if (t == minType) {
Seq()
} else {
val prevT = prevType(t)
val unswizzled = Cat(
x(prevT.sig + prevT.exp - 1),
x(t.sig - 1),
x(prevT.sig + prevT.exp - 2, 0))
val prev = helper(unswizzled, prevT)
val isbox = isBox(x, t)
prev.map(p => (isbox && p._1, p._2))
}
prev :+ (true.B, t.unsafeConvert(x, outType))
}
val (oks, floats) = helper(x, maxType).unzip
if (exactType.isEmpty || floatTypes.size == 1) {
Mux(oks(tag), floats(tag), maxType.qNaN)
} else {
val t = exactType.get
floats(typeTag(t)) | Mux(oks(typeTag(t)), 0.U, t.qNaN)
}
}
// make sure that the redundant bits in the NaN-boxed encoding are consistent
def consistent(x: UInt): Bool = {
def helper(x: UInt, t: FType): Bool = if (typeTag(t) == 0) true.B else {
val prevT = prevType(t)
val unswizzled = Cat(
x(prevT.sig + prevT.exp - 1),
x(t.sig - 1),
x(prevT.sig + prevT.exp - 2, 0))
val prevOK = !isBox(x, t) || helper(unswizzled, prevT)
val curOK = !t.isNaN(x) || x(t.sig + t.exp - 4) === x(t.sig - 2, prevT.recodedWidth - 1).andR
prevOK && curOK
}
helper(x, maxType)
}
// generate a NaN box from an FU result
def box(x: UInt, t: FType): UInt = {
if (t == maxType) {
x
} else {
val nt = floatTypes(typeTag(t) + 1)
val bigger = box(((BigInt(1) << nt.recodedWidth)-1).U, nt, x, t)
bigger | ((BigInt(1) << maxType.recodedWidth) - (BigInt(1) << nt.recodedWidth)).U
}
}
// generate a NaN box from an FU result
def box(x: UInt, tag: UInt): UInt = {
val opts = floatTypes.map(t => box(x, t))
opts(tag)
}
// zap bits that hardfloat thinks are don't-cares, but we do care about
def sanitizeNaN(x: UInt, t: FType): UInt = {
if (typeTag(t) == 0) {
x
} else {
val maskedNaN = x & ~((BigInt(1) << (t.sig-1)) | (BigInt(1) << (t.sig+t.exp-4))).U(t.recodedWidth.W)
Mux(t.isNaN(x), maskedNaN, x)
}
}
// implement NaN boxing and recoding for FL*/fmv.*.x
def recode(x: UInt, tag: UInt): UInt = {
def helper(x: UInt, t: FType): UInt = {
if (typeTag(t) == 0) {
t.recode(x)
} else {
val prevT = prevType(t)
box(t.recode(x), t, helper(x, prevT), prevT)
}
}
// fill MSBs of subword loads to emulate a wider load of a NaN-boxed value
val boxes = floatTypes.map(t => ((BigInt(1) << maxType.ieeeWidth) - (BigInt(1) << t.ieeeWidth)).U)
helper(boxes(tag) | x, maxType)
}
// implement NaN unboxing and un-recoding for FS*/fmv.x.*
def ieee(x: UInt, t: FType = maxType): UInt = {
if (typeTag(t) == 0) {
t.ieee(x)
} else {
val unrecoded = t.ieee(x)
val prevT = prevType(t)
val prevRecoded = Cat(
x(prevT.recodedWidth-2),
x(t.sig-1),
x(prevT.recodedWidth-3, 0))
val prevUnrecoded = ieee(prevRecoded, prevT)
Cat(unrecoded >> prevT.ieeeWidth, Mux(t.isNaN(x), prevUnrecoded, unrecoded(prevT.ieeeWidth-1, 0)))
}
}
}
abstract class FPUModule(implicit val p: Parameters) extends Module with HasCoreParameters with HasFPUParameters
class FPToInt(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
class Output extends Bundle {
val in = new FPInput
val lt = Bool()
val store = Bits(fLen.W)
val toint = Bits(xLen.W)
val exc = Bits(FPConstants.FLAGS_SZ.W)
}
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new Output)
})
val in = RegEnable(io.in.bits, io.in.valid)
val valid = RegNext(io.in.valid)
val dcmp = Module(new hardfloat.CompareRecFN(maxExpWidth, maxSigWidth))
dcmp.io.a := in.in1
dcmp.io.b := in.in2
dcmp.io.signaling := !in.rm(1)
val tag = in.typeTagOut
val toint_ieee = (floatTypes.map(t => if (t == FType.H) Fill(maxType.ieeeWidth / minXLen, ieee(in.in1)(15, 0).sextTo(minXLen))
else Fill(maxType.ieeeWidth / t.ieeeWidth, ieee(in.in1)(t.ieeeWidth - 1, 0))): Seq[UInt])(tag)
val toint = WireDefault(toint_ieee)
val intType = WireDefault(in.fmt(0))
io.out.bits.store := (floatTypes.map(t => Fill(fLen / t.ieeeWidth, ieee(in.in1)(t.ieeeWidth - 1, 0))): Seq[UInt])(tag)
io.out.bits.toint := ((0 until nIntTypes).map(i => toint((minXLen << i) - 1, 0).sextTo(xLen)): Seq[UInt])(intType)
io.out.bits.exc := 0.U
when (in.rm(0)) {
val classify_out = (floatTypes.map(t => t.classify(maxType.unsafeConvert(in.in1, t))): Seq[UInt])(tag)
toint := classify_out | (toint_ieee >> minXLen << minXLen)
intType := false.B
}
when (in.wflags) { // feq/flt/fle, fcvt
toint := (~in.rm & Cat(dcmp.io.lt, dcmp.io.eq)).orR | (toint_ieee >> minXLen << minXLen)
io.out.bits.exc := dcmp.io.exceptionFlags
intType := false.B
when (!in.ren2) { // fcvt
val cvtType = in.typ.extract(log2Ceil(nIntTypes), 1)
intType := cvtType
val conv = Module(new hardfloat.RecFNToIN(maxExpWidth, maxSigWidth, xLen))
conv.io.in := in.in1
conv.io.roundingMode := in.rm
conv.io.signedOut := ~in.typ(0)
toint := conv.io.out
io.out.bits.exc := Cat(conv.io.intExceptionFlags(2, 1).orR, 0.U(3.W), conv.io.intExceptionFlags(0))
for (i <- 0 until nIntTypes-1) {
val w = minXLen << i
when (cvtType === i.U) {
val narrow = Module(new hardfloat.RecFNToIN(maxExpWidth, maxSigWidth, w))
narrow.io.in := in.in1
narrow.io.roundingMode := in.rm
narrow.io.signedOut := ~in.typ(0)
val excSign = in.in1(maxExpWidth + maxSigWidth) && !maxType.isNaN(in.in1)
val excOut = Cat(conv.io.signedOut === excSign, Fill(w-1, !excSign))
val invalid = conv.io.intExceptionFlags(2) || narrow.io.intExceptionFlags(1)
when (invalid) { toint := Cat(conv.io.out >> w, excOut) }
io.out.bits.exc := Cat(invalid, 0.U(3.W), !invalid && conv.io.intExceptionFlags(0))
}
}
}
}
io.out.valid := valid
io.out.bits.lt := dcmp.io.lt || (dcmp.io.a.asSInt < 0.S && dcmp.io.b.asSInt >= 0.S)
io.out.bits.in := in
}
class IntToFP(val latency: Int)(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
val io = IO(new Bundle {
val in = Flipped(Valid(new IntToFPInput))
val out = Valid(new FPResult)
})
val in = Pipe(io.in)
val tag = in.bits.typeTagIn
val mux = Wire(new FPResult)
mux.exc := 0.U
mux.data := recode(in.bits.in1, tag)
val intValue = {
val res = WireDefault(in.bits.in1.asSInt)
for (i <- 0 until nIntTypes-1) {
val smallInt = in.bits.in1((minXLen << i) - 1, 0)
when (in.bits.typ.extract(log2Ceil(nIntTypes), 1) === i.U) {
res := Mux(in.bits.typ(0), smallInt.zext, smallInt.asSInt)
}
}
res.asUInt
}
when (in.bits.wflags) { // fcvt
// could be improved for RVD/RVQ with a single variable-position rounding
// unit, rather than N fixed-position ones
val i2fResults = for (t <- floatTypes) yield {
val i2f = Module(new hardfloat.INToRecFN(xLen, t.exp, t.sig))
i2f.io.signedIn := ~in.bits.typ(0)
i2f.io.in := intValue
i2f.io.roundingMode := in.bits.rm
i2f.io.detectTininess := hardfloat.consts.tininess_afterRounding
(sanitizeNaN(i2f.io.out, t), i2f.io.exceptionFlags)
}
val (data, exc) = i2fResults.unzip
val dataPadded = data.init.map(d => Cat(data.last >> d.getWidth, d)) :+ data.last
mux.data := dataPadded(tag)
mux.exc := exc(tag)
}
io.out <> Pipe(in.valid, mux, latency-1)
}
class FPToFP(val latency: Int)(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new FPResult)
val lt = Input(Bool()) // from FPToInt
})
val in = Pipe(io.in)
val signNum = Mux(in.bits.rm(1), in.bits.in1 ^ in.bits.in2, Mux(in.bits.rm(0), ~in.bits.in2, in.bits.in2))
val fsgnj = Cat(signNum(fLen), in.bits.in1(fLen-1, 0))
val fsgnjMux = Wire(new FPResult)
fsgnjMux.exc := 0.U
fsgnjMux.data := fsgnj
when (in.bits.wflags) { // fmin/fmax
val isnan1 = maxType.isNaN(in.bits.in1)
val isnan2 = maxType.isNaN(in.bits.in2)
val isInvalid = maxType.isSNaN(in.bits.in1) || maxType.isSNaN(in.bits.in2)
val isNaNOut = isnan1 && isnan2
val isLHS = isnan2 || in.bits.rm(0) =/= io.lt && !isnan1
fsgnjMux.exc := isInvalid << 4
fsgnjMux.data := Mux(isNaNOut, maxType.qNaN, Mux(isLHS, in.bits.in1, in.bits.in2))
}
val inTag = in.bits.typeTagIn
val outTag = in.bits.typeTagOut
val mux = WireDefault(fsgnjMux)
for (t <- floatTypes.init) {
when (outTag === typeTag(t).U) {
mux.data := Cat(fsgnjMux.data >> t.recodedWidth, maxType.unsafeConvert(fsgnjMux.data, t))
}
}
when (in.bits.wflags && !in.bits.ren2) { // fcvt
if (floatTypes.size > 1) {
// widening conversions simply canonicalize NaN operands
val widened = Mux(maxType.isNaN(in.bits.in1), maxType.qNaN, in.bits.in1)
fsgnjMux.data := widened
fsgnjMux.exc := maxType.isSNaN(in.bits.in1) << 4
// narrowing conversions require rounding (for RVQ, this could be
// optimized to use a single variable-position rounding unit, rather
// than two fixed-position ones)
for (outType <- floatTypes.init) when (outTag === typeTag(outType).U && ((typeTag(outType) == 0).B || outTag < inTag)) {
val narrower = Module(new hardfloat.RecFNToRecFN(maxType.exp, maxType.sig, outType.exp, outType.sig))
narrower.io.in := in.bits.in1
narrower.io.roundingMode := in.bits.rm
narrower.io.detectTininess := hardfloat.consts.tininess_afterRounding
val narrowed = sanitizeNaN(narrower.io.out, outType)
mux.data := Cat(fsgnjMux.data >> narrowed.getWidth, narrowed)
mux.exc := narrower.io.exceptionFlags
}
}
}
io.out <> Pipe(in.valid, mux, latency-1)
}
class MulAddRecFNPipe(latency: Int, expWidth: Int, sigWidth: Int) extends Module
{
override def desiredName = s"MulAddRecFNPipe_l${latency}_e${expWidth}_s${sigWidth}"
require(latency<=2)
val io = IO(new Bundle {
val validin = Input(Bool())
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
val validout = Output(Bool())
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul = Module(new hardfloat.MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul = Module(new hardfloat.MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
val valid_stage0 = Wire(Bool())
val roundingMode_stage0 = Wire(UInt(3.W))
val detectTininess_stage0 = Wire(UInt(1.W))
val postmul_regs = if(latency>0) 1 else 0
mulAddRecFNToRaw_postMul.io.fromPreMul := Pipe(io.validin, mulAddRecFNToRaw_preMul.io.toPostMul, postmul_regs).bits
mulAddRecFNToRaw_postMul.io.mulAddResult := Pipe(io.validin, mulAddResult, postmul_regs).bits
mulAddRecFNToRaw_postMul.io.roundingMode := Pipe(io.validin, io.roundingMode, postmul_regs).bits
roundingMode_stage0 := Pipe(io.validin, io.roundingMode, postmul_regs).bits
detectTininess_stage0 := Pipe(io.validin, io.detectTininess, postmul_regs).bits
valid_stage0 := Pipe(io.validin, false.B, postmul_regs).valid
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN = Module(new hardfloat.RoundRawFNToRecFN(expWidth, sigWidth, 0))
val round_regs = if(latency==2) 1 else 0
roundRawFNToRecFN.io.invalidExc := Pipe(valid_stage0, mulAddRecFNToRaw_postMul.io.invalidExc, round_regs).bits
roundRawFNToRecFN.io.in := Pipe(valid_stage0, mulAddRecFNToRaw_postMul.io.rawOut, round_regs).bits
roundRawFNToRecFN.io.roundingMode := Pipe(valid_stage0, roundingMode_stage0, round_regs).bits
roundRawFNToRecFN.io.detectTininess := Pipe(valid_stage0, detectTininess_stage0, round_regs).bits
io.validout := Pipe(valid_stage0, false.B, round_regs).valid
roundRawFNToRecFN.io.infiniteExc := false.B
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
class FPUFMAPipe(val latency: Int, val t: FType)
(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
override def desiredName = s"FPUFMAPipe_l${latency}_f${t.ieeeWidth}"
require(latency>0)
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new FPResult)
})
val valid = RegNext(io.in.valid)
val in = Reg(new FPInput)
when (io.in.valid) {
val one = 1.U << (t.sig + t.exp - 1)
val zero = (io.in.bits.in1 ^ io.in.bits.in2) & (1.U << (t.sig + t.exp))
val cmd_fma = io.in.bits.ren3
val cmd_addsub = io.in.bits.swap23
in := io.in.bits
when (cmd_addsub) { in.in2 := one }
when (!(cmd_fma || cmd_addsub)) { in.in3 := zero }
}
val fma = Module(new MulAddRecFNPipe((latency-1) min 2, t.exp, t.sig))
fma.io.validin := valid
fma.io.op := in.fmaCmd
fma.io.roundingMode := in.rm
fma.io.detectTininess := hardfloat.consts.tininess_afterRounding
fma.io.a := in.in1
fma.io.b := in.in2
fma.io.c := in.in3
val res = Wire(new FPResult)
res.data := sanitizeNaN(fma.io.out, t)
res.exc := fma.io.exceptionFlags
io.out := Pipe(fma.io.validout, res, (latency-3) max 0)
}
class FPU(cfg: FPUParams)(implicit p: Parameters) extends FPUModule()(p) {
val io = IO(new FPUIO)
val (useClockGating, useDebugROB) = coreParams match {
case r: RocketCoreParams =>
val sz = if (r.debugROB.isDefined) r.debugROB.get.size else 1
(r.clockGate, sz < 1)
case _ => (false, false)
}
val clock_en_reg = Reg(Bool())
val clock_en = clock_en_reg || io.cp_req.valid
val gated_clock =
if (!useClockGating) clock
else ClockGate(clock, clock_en, "fpu_clock_gate")
val fp_decoder = Module(new FPUDecoder)
fp_decoder.io.inst := io.inst
val id_ctrl = WireInit(fp_decoder.io.sigs)
coreParams match { case r: RocketCoreParams => r.vector.map(v => {
val v_decode = v.decoder(p) // Only need to get ren1
v_decode.io.inst := io.inst
v_decode.io.vconfig := DontCare // core deals with this
when (v_decode.io.legal && v_decode.io.read_frs1) {
id_ctrl.ren1 := true.B
id_ctrl.swap12 := false.B
id_ctrl.toint := true.B
id_ctrl.typeTagIn := I
id_ctrl.typeTagOut := Mux(io.v_sew === 3.U, D, S)
}
when (v_decode.io.write_frd) { id_ctrl.wen := true.B }
})}
val ex_reg_valid = RegNext(io.valid, false.B)
val ex_reg_inst = RegEnable(io.inst, io.valid)
val ex_reg_ctrl = RegEnable(id_ctrl, io.valid)
val ex_ra = List.fill(3)(Reg(UInt()))
// load/vector response
val load_wb = RegNext(io.ll_resp_val)
val load_wb_typeTag = RegEnable(io.ll_resp_type(1,0) - typeTagWbOffset, io.ll_resp_val)
val load_wb_data = RegEnable(io.ll_resp_data, io.ll_resp_val)
val load_wb_tag = RegEnable(io.ll_resp_tag, io.ll_resp_val)
class FPUImpl { // entering gated-clock domain
val req_valid = ex_reg_valid || io.cp_req.valid
val ex_cp_valid = io.cp_req.fire
val mem_cp_valid = RegNext(ex_cp_valid, false.B)
val wb_cp_valid = RegNext(mem_cp_valid, false.B)
val mem_reg_valid = RegInit(false.B)
val killm = (io.killm || io.nack_mem) && !mem_cp_valid
// Kill X-stage instruction if M-stage is killed. This prevents it from
// speculatively being sent to the div-sqrt unit, which can cause priority
// inversion for two back-to-back divides, the first of which is killed.
val killx = io.killx || mem_reg_valid && killm
mem_reg_valid := ex_reg_valid && !killx || ex_cp_valid
val mem_reg_inst = RegEnable(ex_reg_inst, ex_reg_valid)
val wb_reg_valid = RegNext(mem_reg_valid && (!killm || mem_cp_valid), false.B)
val cp_ctrl = Wire(new FPUCtrlSigs)
cp_ctrl :<>= io.cp_req.bits.viewAsSupertype(new FPUCtrlSigs)
io.cp_resp.valid := false.B
io.cp_resp.bits.data := 0.U
io.cp_resp.bits.exc := DontCare
val ex_ctrl = Mux(ex_cp_valid, cp_ctrl, ex_reg_ctrl)
val mem_ctrl = RegEnable(ex_ctrl, req_valid)
val wb_ctrl = RegEnable(mem_ctrl, mem_reg_valid)
// CoreMonitorBundle to monitor fp register file writes
val frfWriteBundle = Seq.fill(2)(WireInit(new CoreMonitorBundle(xLen, fLen), DontCare))
frfWriteBundle.foreach { i =>
i.clock := clock
i.reset := reset
i.hartid := io.hartid
i.timer := io.time(31,0)
i.valid := false.B
i.wrenx := false.B
i.wrenf := false.B
i.excpt := false.B
}
// regfile
val regfile = Mem(32, Bits((fLen+1).W))
when (load_wb) {
val wdata = recode(load_wb_data, load_wb_typeTag)
regfile(load_wb_tag) := wdata
assert(consistent(wdata))
if (enableCommitLog)
printf("f%d p%d 0x%x\n", load_wb_tag, load_wb_tag + 32.U, ieee(wdata))
if (useDebugROB)
DebugROB.pushWb(clock, reset, io.hartid, load_wb, load_wb_tag + 32.U, ieee(wdata))
frfWriteBundle(0).wrdst := load_wb_tag
frfWriteBundle(0).wrenf := true.B
frfWriteBundle(0).wrdata := ieee(wdata)
}
val ex_rs = ex_ra.map(a => regfile(a))
when (io.valid) {
when (id_ctrl.ren1) {
when (!id_ctrl.swap12) { ex_ra(0) := io.inst(19,15) }
when (id_ctrl.swap12) { ex_ra(1) := io.inst(19,15) }
}
when (id_ctrl.ren2) {
when (id_ctrl.swap12) { ex_ra(0) := io.inst(24,20) }
when (id_ctrl.swap23) { ex_ra(2) := io.inst(24,20) }
when (!id_ctrl.swap12 && !id_ctrl.swap23) { ex_ra(1) := io.inst(24,20) }
}
when (id_ctrl.ren3) { ex_ra(2) := io.inst(31,27) }
}
val ex_rm = Mux(ex_reg_inst(14,12) === 7.U, io.fcsr_rm, ex_reg_inst(14,12))
def fuInput(minT: Option[FType]): FPInput = {
val req = Wire(new FPInput)
val tag = ex_ctrl.typeTagIn
req.viewAsSupertype(new Bundle with HasFPUCtrlSigs) :#= ex_ctrl.viewAsSupertype(new Bundle with HasFPUCtrlSigs)
req.rm := ex_rm
req.in1 := unbox(ex_rs(0), tag, minT)
req.in2 := unbox(ex_rs(1), tag, minT)
req.in3 := unbox(ex_rs(2), tag, minT)
req.typ := ex_reg_inst(21,20)
req.fmt := ex_reg_inst(26,25)
req.fmaCmd := ex_reg_inst(3,2) | (!ex_ctrl.ren3 && ex_reg_inst(27))
when (ex_cp_valid) {
req := io.cp_req.bits
when (io.cp_req.bits.swap12) {
req.in1 := io.cp_req.bits.in2
req.in2 := io.cp_req.bits.in1
}
when (io.cp_req.bits.swap23) {
req.in2 := io.cp_req.bits.in3
req.in3 := io.cp_req.bits.in2
}
}
req
}
val sfma = Module(new FPUFMAPipe(cfg.sfmaLatency, FType.S))
sfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === S
sfma.io.in.bits := fuInput(Some(sfma.t))
val fpiu = Module(new FPToInt)
fpiu.io.in.valid := req_valid && (ex_ctrl.toint || ex_ctrl.div || ex_ctrl.sqrt || (ex_ctrl.fastpipe && ex_ctrl.wflags))
fpiu.io.in.bits := fuInput(None)
io.store_data := fpiu.io.out.bits.store
io.toint_data := fpiu.io.out.bits.toint
when(fpiu.io.out.valid && mem_cp_valid && mem_ctrl.toint){
io.cp_resp.bits.data := fpiu.io.out.bits.toint
io.cp_resp.valid := true.B
}
val ifpu = Module(new IntToFP(cfg.ifpuLatency))
ifpu.io.in.valid := req_valid && ex_ctrl.fromint
ifpu.io.in.bits := fpiu.io.in.bits
ifpu.io.in.bits.in1 := Mux(ex_cp_valid, io.cp_req.bits.in1, io.fromint_data)
val fpmu = Module(new FPToFP(cfg.fpmuLatency))
fpmu.io.in.valid := req_valid && ex_ctrl.fastpipe
fpmu.io.in.bits := fpiu.io.in.bits
fpmu.io.lt := fpiu.io.out.bits.lt
val divSqrt_wen = WireDefault(false.B)
val divSqrt_inFlight = WireDefault(false.B)
val divSqrt_waddr = Reg(UInt(5.W))
val divSqrt_cp = Reg(Bool())
val divSqrt_typeTag = Wire(UInt(log2Up(floatTypes.size).W))
val divSqrt_wdata = Wire(UInt((fLen+1).W))
val divSqrt_flags = Wire(UInt(FPConstants.FLAGS_SZ.W))
divSqrt_typeTag := DontCare
divSqrt_wdata := DontCare
divSqrt_flags := DontCare
// writeback arbitration
case class Pipe(p: Module, lat: Int, cond: (FPUCtrlSigs) => Bool, res: FPResult)
val pipes = List(
Pipe(fpmu, fpmu.latency, (c: FPUCtrlSigs) => c.fastpipe, fpmu.io.out.bits),
Pipe(ifpu, ifpu.latency, (c: FPUCtrlSigs) => c.fromint, ifpu.io.out.bits),
Pipe(sfma, sfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === S, sfma.io.out.bits)) ++
(fLen > 32).option({
val dfma = Module(new FPUFMAPipe(cfg.dfmaLatency, FType.D))
dfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === D
dfma.io.in.bits := fuInput(Some(dfma.t))
Pipe(dfma, dfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === D, dfma.io.out.bits)
}) ++
(minFLen == 16).option({
val hfma = Module(new FPUFMAPipe(cfg.sfmaLatency, FType.H))
hfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === H
hfma.io.in.bits := fuInput(Some(hfma.t))
Pipe(hfma, hfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === H, hfma.io.out.bits)
})
def latencyMask(c: FPUCtrlSigs, offset: Int) = {
require(pipes.forall(_.lat >= offset))
pipes.map(p => Mux(p.cond(c), (1 << p.lat-offset).U, 0.U)).reduce(_|_)
}
def pipeid(c: FPUCtrlSigs) = pipes.zipWithIndex.map(p => Mux(p._1.cond(c), p._2.U, 0.U)).reduce(_|_)
val maxLatency = pipes.map(_.lat).max
val memLatencyMask = latencyMask(mem_ctrl, 2)
class WBInfo extends Bundle {
val rd = UInt(5.W)
val typeTag = UInt(log2Up(floatTypes.size).W)
val cp = Bool()
val pipeid = UInt(log2Ceil(pipes.size).W)
}
val wen = RegInit(0.U((maxLatency-1).W))
val wbInfo = Reg(Vec(maxLatency-1, new WBInfo))
val mem_wen = mem_reg_valid && (mem_ctrl.fma || mem_ctrl.fastpipe || mem_ctrl.fromint)
val write_port_busy = RegEnable(mem_wen && (memLatencyMask & latencyMask(ex_ctrl, 1)).orR || (wen & latencyMask(ex_ctrl, 0)).orR, req_valid)
ccover(mem_reg_valid && write_port_busy, "WB_STRUCTURAL", "structural hazard on writeback")
for (i <- 0 until maxLatency-2) {
when (wen(i+1)) { wbInfo(i) := wbInfo(i+1) }
}
wen := wen >> 1
when (mem_wen) {
when (!killm) {
wen := wen >> 1 | memLatencyMask
}
for (i <- 0 until maxLatency-1) {
when (!write_port_busy && memLatencyMask(i)) {
wbInfo(i).cp := mem_cp_valid
wbInfo(i).typeTag := mem_ctrl.typeTagOut
wbInfo(i).pipeid := pipeid(mem_ctrl)
wbInfo(i).rd := mem_reg_inst(11,7)
}
}
}
val waddr = Mux(divSqrt_wen, divSqrt_waddr, wbInfo(0).rd)
val wb_cp = Mux(divSqrt_wen, divSqrt_cp, wbInfo(0).cp)
val wtypeTag = Mux(divSqrt_wen, divSqrt_typeTag, wbInfo(0).typeTag)
val wdata = box(Mux(divSqrt_wen, divSqrt_wdata, (pipes.map(_.res.data): Seq[UInt])(wbInfo(0).pipeid)), wtypeTag)
val wexc = (pipes.map(_.res.exc): Seq[UInt])(wbInfo(0).pipeid)
when ((!wbInfo(0).cp && wen(0)) || divSqrt_wen) {
assert(consistent(wdata))
regfile(waddr) := wdata
if (enableCommitLog) {
printf("f%d p%d 0x%x\n", waddr, waddr + 32.U, ieee(wdata))
}
frfWriteBundle(1).wrdst := waddr
frfWriteBundle(1).wrenf := true.B
frfWriteBundle(1).wrdata := ieee(wdata)
}
if (useDebugROB) {
DebugROB.pushWb(clock, reset, io.hartid, (!wbInfo(0).cp && wen(0)) || divSqrt_wen, waddr + 32.U, ieee(wdata))
}
when (wb_cp && (wen(0) || divSqrt_wen)) {
io.cp_resp.bits.data := wdata
io.cp_resp.valid := true.B
}
assert(!io.cp_req.valid || pipes.forall(_.lat == pipes.head.lat).B,
s"FPU only supports coprocessor if FMA pipes have uniform latency ${pipes.map(_.lat)}")
// Avoid structural hazards and nacking of external requests
// toint responds in the MEM stage, so an incoming toint can induce a structural hazard against inflight FMAs
io.cp_req.ready := !ex_reg_valid && !(cp_ctrl.toint && wen =/= 0.U) && !divSqrt_inFlight
val wb_toint_valid = wb_reg_valid && wb_ctrl.toint
val wb_toint_exc = RegEnable(fpiu.io.out.bits.exc, mem_ctrl.toint)
io.fcsr_flags.valid := wb_toint_valid || divSqrt_wen || wen(0)
io.fcsr_flags.bits :=
Mux(wb_toint_valid, wb_toint_exc, 0.U) |
Mux(divSqrt_wen, divSqrt_flags, 0.U) |
Mux(wen(0), wexc, 0.U)
val divSqrt_write_port_busy = (mem_ctrl.div || mem_ctrl.sqrt) && wen.orR
io.fcsr_rdy := !(ex_reg_valid && ex_ctrl.wflags || mem_reg_valid && mem_ctrl.wflags || wb_reg_valid && wb_ctrl.toint || wen.orR || divSqrt_inFlight)
io.nack_mem := (write_port_busy || divSqrt_write_port_busy || divSqrt_inFlight) && !mem_cp_valid
io.dec <> id_ctrl
def useScoreboard(f: ((Pipe, Int)) => Bool) = pipes.zipWithIndex.filter(_._1.lat > 3).map(x => f(x)).fold(false.B)(_||_)
io.sboard_set := wb_reg_valid && !wb_cp_valid && RegNext(useScoreboard(_._1.cond(mem_ctrl)) || mem_ctrl.div || mem_ctrl.sqrt || mem_ctrl.vec)
io.sboard_clr := !wb_cp_valid && (divSqrt_wen || (wen(0) && useScoreboard(x => wbInfo(0).pipeid === x._2.U)))
io.sboard_clra := waddr
ccover(io.sboard_clr && load_wb, "DUAL_WRITEBACK", "load and FMA writeback on same cycle")
// we don't currently support round-max-magnitude (rm=4)
io.illegal_rm := io.inst(14,12).isOneOf(5.U, 6.U) || io.inst(14,12) === 7.U && io.fcsr_rm >= 5.U
if (cfg.divSqrt) {
val divSqrt_inValid = mem_reg_valid && (mem_ctrl.div || mem_ctrl.sqrt) && !divSqrt_inFlight
val divSqrt_killed = RegNext(divSqrt_inValid && killm, true.B)
when (divSqrt_inValid) {
divSqrt_waddr := mem_reg_inst(11,7)
divSqrt_cp := mem_cp_valid
}
ccover(divSqrt_inFlight && divSqrt_killed, "DIV_KILLED", "divide killed after issued to divider")
ccover(divSqrt_inFlight && mem_reg_valid && (mem_ctrl.div || mem_ctrl.sqrt), "DIV_BUSY", "divider structural hazard")
ccover(mem_reg_valid && divSqrt_write_port_busy, "DIV_WB_STRUCTURAL", "structural hazard on division writeback")
for (t <- floatTypes) {
val tag = mem_ctrl.typeTagOut
val divSqrt = withReset(divSqrt_killed) { Module(new hardfloat.DivSqrtRecFN_small(t.exp, t.sig, 0)) }
divSqrt.io.inValid := divSqrt_inValid && tag === typeTag(t).U
divSqrt.io.sqrtOp := mem_ctrl.sqrt
divSqrt.io.a := maxType.unsafeConvert(fpiu.io.out.bits.in.in1, t)
divSqrt.io.b := maxType.unsafeConvert(fpiu.io.out.bits.in.in2, t)
divSqrt.io.roundingMode := fpiu.io.out.bits.in.rm
divSqrt.io.detectTininess := hardfloat.consts.tininess_afterRounding
when (!divSqrt.io.inReady) { divSqrt_inFlight := true.B } // only 1 in flight
when (divSqrt.io.outValid_div || divSqrt.io.outValid_sqrt) {
divSqrt_wen := !divSqrt_killed
divSqrt_wdata := sanitizeNaN(divSqrt.io.out, t)
divSqrt_flags := divSqrt.io.exceptionFlags
divSqrt_typeTag := typeTag(t).U
}
}
when (divSqrt_killed) { divSqrt_inFlight := false.B }
} else {
when (id_ctrl.div || id_ctrl.sqrt) { io.illegal_rm := true.B }
}
// gate the clock
clock_en_reg := !useClockGating.B ||
io.keep_clock_enabled || // chicken bit
io.valid || // ID stage
req_valid || // EX stage
mem_reg_valid || mem_cp_valid || // MEM stage
wb_reg_valid || wb_cp_valid || // WB stage
wen.orR || divSqrt_inFlight || // post-WB stage
io.ll_resp_val // load writeback
} // leaving gated-clock domain
val fpuImpl = withClock (gated_clock) { new FPUImpl }
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
property.cover(cond, s"FPU_$label", "Core;;" + desc)
} | module FPUFMAPipe_l3_f16(
input clock,
input reset,
input io_in_valid,
input io_in_bits_ren3,
input io_in_bits_swap23,
input [2:0] io_in_bits_rm,
input [1:0] io_in_bits_fmaCmd,
input [64:0] io_in_bits_in1,
input [64:0] io_in_bits_in2,
input [64:0] io_in_bits_in3,
output [64:0] io_out_bits_data,
output [4:0] io_out_bits_exc
);
wire [16:0] _fma_io_out;
reg valid;
reg [2:0] in_rm;
reg [1:0] in_fmaCmd;
reg [64:0] in_in1;
reg [64:0] in_in2;
reg [64:0] in_in3;
always @(posedge clock) begin
valid <= io_in_valid;
if (io_in_valid) begin
in_rm <= io_in_bits_rm;
in_fmaCmd <= io_in_bits_fmaCmd;
in_in1 <= io_in_bits_in1;
in_in2 <= io_in_bits_swap23 ? 65'h8000 : io_in_bits_in2;
in_in3 <= io_in_bits_ren3 | io_in_bits_swap23 ? io_in_bits_in3 : {48'h0, (io_in_bits_in1[16:0] ^ io_in_bits_in2[16:0]) & 17'h10000};
end
end
MulAddRecFNPipe_l2_e5_s11 fma (
.clock (clock),
.reset (reset),
.io_validin (valid),
.io_op (in_fmaCmd),
.io_a (in_in1[16:0]),
.io_b (in_in2[16:0]),
.io_c (in_in3[16:0]),
.io_roundingMode (in_rm),
.io_out (_fma_io_out),
.io_exceptionFlags (io_out_bits_exc)
);
assign io_out_bits_data = {48'h0, _fma_io_out};
endmodule |
Generate the Verilog code corresponding to this Chisel code //******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Rename FreeList
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.exu
import chisel3._
import chisel3.util._
import boom.v3.common._
import boom.v3.util._
import org.chipsalliance.cde.config.Parameters
class RenameFreeList(
val plWidth: Int,
val numPregs: Int,
val numLregs: Int)
(implicit p: Parameters) extends BoomModule
{
private val pregSz = log2Ceil(numPregs)
private val n = numPregs
val io = IO(new BoomBundle()(p) {
// Physical register requests.
val reqs = Input(Vec(plWidth, Bool()))
val alloc_pregs = Output(Vec(plWidth, Valid(UInt(pregSz.W))))
// Pregs returned by the ROB.
val dealloc_pregs = Input(Vec(plWidth, Valid(UInt(pregSz.W))))
// Branch info for starting new allocation lists.
val ren_br_tags = Input(Vec(plWidth, Valid(UInt(brTagSz.W))))
// Mispredict info for recovering speculatively allocated registers.
val brupdate = Input(new BrUpdateInfo)
val debug = new Bundle {
val pipeline_empty = Input(Bool())
val freelist = Output(Bits(numPregs.W))
val isprlist = Output(Bits(numPregs.W))
}
})
// The free list register array and its branch allocation lists.
val free_list = RegInit(UInt(numPregs.W), ~(1.U(numPregs.W)))
val br_alloc_lists = Reg(Vec(maxBrCount, UInt(numPregs.W)))
// Select pregs from the free list.
val sels = SelectFirstN(free_list, plWidth)
val sel_fire = Wire(Vec(plWidth, Bool()))
// Allocations seen by branches in each pipeline slot.
val allocs = io.alloc_pregs map (a => UIntToOH(a.bits))
val alloc_masks = (allocs zip io.reqs).scanRight(0.U(n.W)) { case ((a,r),m) => m | a & Fill(n,r) }
// Masks that modify the freelist array.
val sel_mask = (sels zip sel_fire) map { case (s,f) => s & Fill(n,f) } reduce(_|_)
val br_deallocs = br_alloc_lists(io.brupdate.b2.uop.br_tag) & Fill(n, io.brupdate.b2.mispredict)
val dealloc_mask = io.dealloc_pregs.map(d => UIntToOH(d.bits)(numPregs-1,0) & Fill(n,d.valid)).reduce(_|_) | br_deallocs
val br_slots = VecInit(io.ren_br_tags.map(tag => tag.valid)).asUInt
// Create branch allocation lists.
for (i <- 0 until maxBrCount) {
val list_req = VecInit(io.ren_br_tags.map(tag => UIntToOH(tag.bits)(i))).asUInt & br_slots
val new_list = list_req.orR
br_alloc_lists(i) := Mux(new_list, Mux1H(list_req, alloc_masks.slice(1, plWidth+1)),
br_alloc_lists(i) & ~br_deallocs | alloc_masks(0))
}
// Update the free list.
free_list := (free_list & ~sel_mask | dealloc_mask) & ~(1.U(numPregs.W))
// Pipeline logic | hookup outputs.
for (w <- 0 until plWidth) {
val can_sel = sels(w).orR
val r_valid = RegInit(false.B)
val r_sel = RegEnable(OHToUInt(sels(w)), sel_fire(w))
r_valid := r_valid && !io.reqs(w) || can_sel
sel_fire(w) := (!r_valid || io.reqs(w)) && can_sel
io.alloc_pregs(w).bits := r_sel
io.alloc_pregs(w).valid := r_valid
}
io.debug.freelist := free_list | io.alloc_pregs.map(p => UIntToOH(p.bits) & Fill(n,p.valid)).reduce(_|_)
io.debug.isprlist := 0.U // TODO track commit free list.
assert (!(io.debug.freelist & dealloc_mask).orR, "[freelist] Returning a free physical register.")
assert (!io.debug.pipeline_empty || PopCount(io.debug.freelist) >= (numPregs - numLregs - 1).U,
"[freelist] Leaking physical registers.")
} | module RenameFreeList_1(
input clock,
input reset,
input io_reqs_0,
output io_alloc_pregs_0_valid,
output [5:0] io_alloc_pregs_0_bits,
input io_dealloc_pregs_0_valid,
input [5:0] io_dealloc_pregs_0_bits,
input io_ren_br_tags_0_valid,
input [2:0] io_ren_br_tags_0_bits,
input [2:0] io_brupdate_b2_uop_br_tag,
input io_brupdate_b2_mispredict,
input io_debug_pipeline_empty
);
reg [5:0] r_sel;
reg [47:0] free_list;
reg [47:0] br_alloc_lists_0;
reg [47:0] br_alloc_lists_1;
reg [47:0] br_alloc_lists_2;
reg [47:0] br_alloc_lists_3;
reg [47:0] br_alloc_lists_4;
reg [47:0] br_alloc_lists_5;
reg [47:0] br_alloc_lists_6;
reg [47:0] br_alloc_lists_7;
wire [47:0] sels_0 = free_list[0] ? 48'h1 : free_list[1] ? 48'h2 : free_list[2] ? 48'h4 : free_list[3] ? 48'h8 : free_list[4] ? 48'h10 : free_list[5] ? 48'h20 : free_list[6] ? 48'h40 : free_list[7] ? 48'h80 : free_list[8] ? 48'h100 : free_list[9] ? 48'h200 : free_list[10] ? 48'h400 : free_list[11] ? 48'h800 : free_list[12] ? 48'h1000 : free_list[13] ? 48'h2000 : free_list[14] ? 48'h4000 : free_list[15] ? 48'h8000 : free_list[16] ? 48'h10000 : free_list[17] ? 48'h20000 : free_list[18] ? 48'h40000 : free_list[19] ? 48'h80000 : free_list[20] ? 48'h100000 : free_list[21] ? 48'h200000 : free_list[22] ? 48'h400000 : free_list[23] ? 48'h800000 : free_list[24] ? 48'h1000000 : free_list[25] ? 48'h2000000 : free_list[26] ? 48'h4000000 : free_list[27] ? 48'h8000000 : free_list[28] ? 48'h10000000 : free_list[29] ? 48'h20000000 : free_list[30] ? 48'h40000000 : free_list[31] ? 48'h80000000 : free_list[32] ? 48'h100000000 : free_list[33] ? 48'h200000000 : free_list[34] ? 48'h400000000 : free_list[35] ? 48'h800000000 : free_list[36] ? 48'h1000000000 : free_list[37] ? 48'h2000000000 : free_list[38] ? 48'h4000000000 : free_list[39] ? 48'h8000000000 : free_list[40] ? 48'h10000000000 : free_list[41] ? 48'h20000000000 : free_list[42] ? 48'h40000000000 : free_list[43] ? 48'h80000000000 : free_list[44] ? 48'h100000000000 : free_list[45] ? 48'h200000000000 : free_list[46] ? 48'h400000000000 : {free_list[47], 47'h0};
wire [63:0] allocs_0 = 64'h1 << r_sel;
wire [7:0][47:0] _GEN = {{br_alloc_lists_7}, {br_alloc_lists_6}, {br_alloc_lists_5}, {br_alloc_lists_4}, {br_alloc_lists_3}, {br_alloc_lists_2}, {br_alloc_lists_1}, {br_alloc_lists_0}};
wire [47:0] br_deallocs = _GEN[io_brupdate_b2_uop_br_tag] & {48{io_brupdate_b2_mispredict}};
wire [63:0] _dealloc_mask_T = 64'h1 << io_dealloc_pregs_0_bits;
wire [47:0] dealloc_mask = _dealloc_mask_T[47:0] & {48{io_dealloc_pregs_0_valid}} | br_deallocs;
reg r_valid;
wire sel_fire_0 = (~r_valid | io_reqs_0) & (|sels_0);
wire [30:0] _r_sel_T_1 = {16'h0, sels_0[47:33]} | sels_0[31:1];
wire [14:0] _r_sel_T_3 = _r_sel_T_1[30:16] | _r_sel_T_1[14:0];
wire [6:0] _r_sel_T_5 = _r_sel_T_3[14:8] | _r_sel_T_3[6:0];
wire [2:0] _r_sel_T_7 = _r_sel_T_5[6:4] | _r_sel_T_5[2:0];
wire [47:0] _GEN_0 = allocs_0[47:0] & {48{io_reqs_0}};
always @(posedge clock) begin
if (reset) begin
free_list <= 48'hFFFFFFFFFFFE;
r_valid <= 1'h0;
end
else begin
free_list <= (free_list & ~(sels_0 & {48{sel_fire_0}}) | dealloc_mask) & 48'hFFFFFFFFFFFE;
r_valid <= |{r_valid & ~io_reqs_0, sels_0};
end
br_alloc_lists_0 <= io_ren_br_tags_0_bits == 3'h0 & io_ren_br_tags_0_valid ? 48'h0 : br_alloc_lists_0 & ~br_deallocs | _GEN_0;
br_alloc_lists_1 <= io_ren_br_tags_0_bits == 3'h1 & io_ren_br_tags_0_valid ? 48'h0 : br_alloc_lists_1 & ~br_deallocs | _GEN_0;
br_alloc_lists_2 <= io_ren_br_tags_0_bits == 3'h2 & io_ren_br_tags_0_valid ? 48'h0 : br_alloc_lists_2 & ~br_deallocs | _GEN_0;
br_alloc_lists_3 <= io_ren_br_tags_0_bits == 3'h3 & io_ren_br_tags_0_valid ? 48'h0 : br_alloc_lists_3 & ~br_deallocs | _GEN_0;
br_alloc_lists_4 <= io_ren_br_tags_0_bits == 3'h4 & io_ren_br_tags_0_valid ? 48'h0 : br_alloc_lists_4 & ~br_deallocs | _GEN_0;
br_alloc_lists_5 <= io_ren_br_tags_0_bits == 3'h5 & io_ren_br_tags_0_valid ? 48'h0 : br_alloc_lists_5 & ~br_deallocs | _GEN_0;
br_alloc_lists_6 <= io_ren_br_tags_0_bits == 3'h6 & io_ren_br_tags_0_valid ? 48'h0 : br_alloc_lists_6 & ~br_deallocs | _GEN_0;
br_alloc_lists_7 <= (&io_ren_br_tags_0_bits) & io_ren_br_tags_0_valid ? 48'h0 : br_alloc_lists_7 & ~br_deallocs | _GEN_0;
if (sel_fire_0)
r_sel <= {|(sels_0[47:32]), |(_r_sel_T_1[30:15]), |(_r_sel_T_3[14:7]), |(_r_sel_T_5[6:3]), |(_r_sel_T_7[2:1]), _r_sel_T_7[2] | _r_sel_T_7[0]};
end
assign io_alloc_pregs_0_valid = r_valid;
assign io_alloc_pregs_0_bits = r_sel;
endmodule |
Generate the Verilog code corresponding to this Chisel code //******************************************************************************
// Copyright (c) 2012 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// RISCV Processor Register Read
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.exu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import boom.v3.common._
import boom.v3.util._
/**
* Handle the register read and bypass network for the OoO backend
* interfaces with the issue window on the enqueue side, and the execution
* pipelines on the dequeue side.
*
* @param issueWidth total issue width from all issue queues
* @param supportedUnitsArray seq of SupportedFuncUnits classes indicating what the functional units do
* @param numTotalReadPorts number of read ports
* @param numReadPortsArray execution units read port sequence
* @param numTotalBypassPorts number of bypass ports out of the execution units
* @param registerWidth size of register in bits
*/
class RegisterRead(
issueWidth: Int,
supportedUnitsArray: Seq[SupportedFuncUnits],
numTotalReadPorts: Int,
numReadPortsArray: Seq[Int],
// each exe_unit must tell us how many max
// operands it can accept (the sum should equal
// numTotalReadPorts)
numTotalBypassPorts: Int,
numTotalPredBypassPorts: Int,
registerWidth: Int
)(implicit p: Parameters) extends BoomModule
{
val io = IO(new Bundle {
// issued micro-ops
val iss_valids = Input(Vec(issueWidth, Bool()))
val iss_uops = Input(Vec(issueWidth, new MicroOp()))
// interface with register file's read ports
val rf_read_ports = Flipped(Vec(numTotalReadPorts, new RegisterFileReadPortIO(maxPregSz, registerWidth)))
val prf_read_ports = Flipped(Vec(issueWidth, new RegisterFileReadPortIO(log2Ceil(ftqSz), 1)))
val bypass = Input(Vec(numTotalBypassPorts, Valid(new ExeUnitResp(registerWidth))))
val pred_bypass = Input(Vec(numTotalPredBypassPorts, Valid(new ExeUnitResp(1))))
// send micro-ops to the execution pipelines
val exe_reqs = Vec(issueWidth, (new DecoupledIO(new FuncUnitReq(registerWidth))))
val kill = Input(Bool())
val brupdate = Input(new BrUpdateInfo())
})
val rrd_valids = Wire(Vec(issueWidth, Bool()))
val rrd_uops = Wire(Vec(issueWidth, new MicroOp()))
val exe_reg_valids = RegInit(VecInit(Seq.fill(issueWidth) { false.B }))
val exe_reg_uops = Reg(Vec(issueWidth, new MicroOp()))
val exe_reg_rs1_data = Reg(Vec(issueWidth, Bits(registerWidth.W)))
val exe_reg_rs2_data = Reg(Vec(issueWidth, Bits(registerWidth.W)))
val exe_reg_rs3_data = Reg(Vec(issueWidth, Bits(registerWidth.W)))
val exe_reg_pred_data = Reg(Vec(issueWidth, Bool()))
//-------------------------------------------------------------
// hook up inputs
for (w <- 0 until issueWidth) {
val rrd_decode_unit = Module(new RegisterReadDecode(supportedUnitsArray(w)))
rrd_decode_unit.io.iss_valid := io.iss_valids(w)
rrd_decode_unit.io.iss_uop := io.iss_uops(w)
rrd_valids(w) := RegNext(rrd_decode_unit.io.rrd_valid &&
!IsKilledByBranch(io.brupdate, rrd_decode_unit.io.rrd_uop))
rrd_uops(w) := RegNext(GetNewUopAndBrMask(rrd_decode_unit.io.rrd_uop, io.brupdate))
}
//-------------------------------------------------------------
// read ports
require (numTotalReadPorts == numReadPortsArray.reduce(_+_))
val rrd_rs1_data = Wire(Vec(issueWidth, Bits(registerWidth.W)))
val rrd_rs2_data = Wire(Vec(issueWidth, Bits(registerWidth.W)))
val rrd_rs3_data = Wire(Vec(issueWidth, Bits(registerWidth.W)))
val rrd_pred_data = Wire(Vec(issueWidth, Bool()))
rrd_rs1_data := DontCare
rrd_rs2_data := DontCare
rrd_rs3_data := DontCare
rrd_pred_data := DontCare
io.prf_read_ports := DontCare
var idx = 0 // index into flattened read_ports array
for (w <- 0 until issueWidth) {
val numReadPorts = numReadPortsArray(w)
// NOTE:
// rrdLatency==1, we need to send read address at end of ISS stage,
// in order to get read data back at end of RRD stage.
val rs1_addr = io.iss_uops(w).prs1
val rs2_addr = io.iss_uops(w).prs2
val rs3_addr = io.iss_uops(w).prs3
val pred_addr = io.iss_uops(w).ppred
if (numReadPorts > 0) io.rf_read_ports(idx+0).addr := rs1_addr
if (numReadPorts > 1) io.rf_read_ports(idx+1).addr := rs2_addr
if (numReadPorts > 2) io.rf_read_ports(idx+2).addr := rs3_addr
if (enableSFBOpt) io.prf_read_ports(w).addr := pred_addr
if (numReadPorts > 0) rrd_rs1_data(w) := Mux(RegNext(rs1_addr === 0.U), 0.U, io.rf_read_ports(idx+0).data)
if (numReadPorts > 1) rrd_rs2_data(w) := Mux(RegNext(rs2_addr === 0.U), 0.U, io.rf_read_ports(idx+1).data)
if (numReadPorts > 2) rrd_rs3_data(w) := Mux(RegNext(rs3_addr === 0.U), 0.U, io.rf_read_ports(idx+2).data)
if (enableSFBOpt) rrd_pred_data(w) := Mux(RegNext(io.iss_uops(w).is_sfb_shadow), io.prf_read_ports(w).data, false.B)
val rrd_kill = io.kill || IsKilledByBranch(io.brupdate, rrd_uops(w))
exe_reg_valids(w) := Mux(rrd_kill, false.B, rrd_valids(w))
// TODO use only the valids signal, don't require us to set nullUop
exe_reg_uops(w) := Mux(rrd_kill, NullMicroOp, rrd_uops(w))
exe_reg_uops(w).br_mask := GetNewBrMask(io.brupdate, rrd_uops(w))
idx += numReadPorts
}
//-------------------------------------------------------------
//-------------------------------------------------------------
// BYPASS MUXES -----------------------------------------------
// performed at the end of the register read stage
// NOTES: this code is fairly hard-coded. Sorry.
// ASSUMPTIONS:
// - rs3 is used for FPU ops which are NOT bypassed (so don't check
// them!).
// - only bypass integer registers.
val bypassed_rs1_data = Wire(Vec(issueWidth, Bits(registerWidth.W)))
val bypassed_rs2_data = Wire(Vec(issueWidth, Bits(registerWidth.W)))
val bypassed_pred_data = Wire(Vec(issueWidth, Bool()))
bypassed_pred_data := DontCare
for (w <- 0 until issueWidth) {
val numReadPorts = numReadPortsArray(w)
var rs1_cases = Array((false.B, 0.U(registerWidth.W)))
var rs2_cases = Array((false.B, 0.U(registerWidth.W)))
var pred_cases = Array((false.B, 0.U(1.W)))
val prs1 = rrd_uops(w).prs1
val lrs1_rtype = rrd_uops(w).lrs1_rtype
val prs2 = rrd_uops(w).prs2
val lrs2_rtype = rrd_uops(w).lrs2_rtype
val ppred = rrd_uops(w).ppred
for (b <- 0 until numTotalBypassPorts)
{
val bypass = io.bypass(b)
// can't use "io.bypass.valid(b) since it would create a combinational loop on branch kills"
rs1_cases ++= Array((bypass.valid && (prs1 === bypass.bits.uop.pdst) && bypass.bits.uop.rf_wen
&& bypass.bits.uop.dst_rtype === RT_FIX && lrs1_rtype === RT_FIX && (prs1 =/= 0.U), bypass.bits.data))
rs2_cases ++= Array((bypass.valid && (prs2 === bypass.bits.uop.pdst) && bypass.bits.uop.rf_wen
&& bypass.bits.uop.dst_rtype === RT_FIX && lrs2_rtype === RT_FIX && (prs2 =/= 0.U), bypass.bits.data))
}
for (b <- 0 until numTotalPredBypassPorts)
{
val bypass = io.pred_bypass(b)
pred_cases ++= Array((bypass.valid && (ppred === bypass.bits.uop.pdst) && bypass.bits.uop.is_sfb_br, bypass.bits.data))
}
if (numReadPorts > 0) bypassed_rs1_data(w) := MuxCase(rrd_rs1_data(w), rs1_cases)
if (numReadPorts > 1) bypassed_rs2_data(w) := MuxCase(rrd_rs2_data(w), rs2_cases)
if (enableSFBOpt) bypassed_pred_data(w) := MuxCase(rrd_pred_data(w), pred_cases)
}
//-------------------------------------------------------------
//-------------------------------------------------------------
// **** Execute Stage ****
//-------------------------------------------------------------
//-------------------------------------------------------------
for (w <- 0 until issueWidth) {
val numReadPorts = numReadPortsArray(w)
if (numReadPorts > 0) exe_reg_rs1_data(w) := bypassed_rs1_data(w)
if (numReadPorts > 1) exe_reg_rs2_data(w) := bypassed_rs2_data(w)
if (numReadPorts > 2) exe_reg_rs3_data(w) := rrd_rs3_data(w)
if (enableSFBOpt) exe_reg_pred_data(w) := bypassed_pred_data(w)
// ASSUMPTION: rs3 is FPU which is NOT bypassed
}
// TODO add assert to detect bypass conflicts on non-bypassable things
// TODO add assert that checks bypassing to verify there isn't something it hits rs3
//-------------------------------------------------------------
// set outputs to execute pipelines
for (w <- 0 until issueWidth) {
val numReadPorts = numReadPortsArray(w)
io.exe_reqs(w).valid := exe_reg_valids(w)
io.exe_reqs(w).bits := DontCare
io.exe_reqs(w).bits.uop := exe_reg_uops(w)
if (numReadPorts > 0) io.exe_reqs(w).bits.rs1_data := exe_reg_rs1_data(w)
if (numReadPorts > 1) io.exe_reqs(w).bits.rs2_data := exe_reg_rs2_data(w)
if (numReadPorts > 2) io.exe_reqs(w).bits.rs3_data := exe_reg_rs3_data(w)
if (enableSFBOpt) io.exe_reqs(w).bits.pred_data := exe_reg_pred_data(w)
}
} | module RegisterRead(
input clock,
input reset,
input io_iss_valids_0,
input [6:0] io_iss_uops_0_uopc,
input [31:0] io_iss_uops_0_inst,
input [31:0] io_iss_uops_0_debug_inst,
input io_iss_uops_0_is_rvc,
input [39:0] io_iss_uops_0_debug_pc,
input [2:0] io_iss_uops_0_iq_type,
input [9:0] io_iss_uops_0_fu_code,
input [1:0] io_iss_uops_0_iw_state,
input io_iss_uops_0_is_br,
input io_iss_uops_0_is_jalr,
input io_iss_uops_0_is_jal,
input io_iss_uops_0_is_sfb,
input [7:0] io_iss_uops_0_br_mask,
input [2:0] io_iss_uops_0_br_tag,
input [3:0] io_iss_uops_0_ftq_idx,
input io_iss_uops_0_edge_inst,
input [5:0] io_iss_uops_0_pc_lob,
input io_iss_uops_0_taken,
input [19:0] io_iss_uops_0_imm_packed,
input [11:0] io_iss_uops_0_csr_addr,
input [4:0] io_iss_uops_0_rob_idx,
input [2:0] io_iss_uops_0_ldq_idx,
input [2:0] io_iss_uops_0_stq_idx,
input [1:0] io_iss_uops_0_rxq_idx,
input [5:0] io_iss_uops_0_pdst,
input [5:0] io_iss_uops_0_prs1,
input [5:0] io_iss_uops_0_prs2,
input [5:0] io_iss_uops_0_prs3,
input [3:0] io_iss_uops_0_ppred,
input io_iss_uops_0_prs1_busy,
input io_iss_uops_0_prs2_busy,
input io_iss_uops_0_prs3_busy,
input io_iss_uops_0_ppred_busy,
input [5:0] io_iss_uops_0_stale_pdst,
input io_iss_uops_0_exception,
input [63:0] io_iss_uops_0_exc_cause,
input io_iss_uops_0_bypassable,
input [4:0] io_iss_uops_0_mem_cmd,
input [1:0] io_iss_uops_0_mem_size,
input io_iss_uops_0_mem_signed,
input io_iss_uops_0_is_fence,
input io_iss_uops_0_is_fencei,
input io_iss_uops_0_is_amo,
input io_iss_uops_0_uses_ldq,
input io_iss_uops_0_uses_stq,
input io_iss_uops_0_is_sys_pc2epc,
input io_iss_uops_0_is_unique,
input io_iss_uops_0_flush_on_commit,
input io_iss_uops_0_ldst_is_rs1,
input [5:0] io_iss_uops_0_ldst,
input [5:0] io_iss_uops_0_lrs1,
input [5:0] io_iss_uops_0_lrs2,
input [5:0] io_iss_uops_0_lrs3,
input io_iss_uops_0_ldst_val,
input [1:0] io_iss_uops_0_dst_rtype,
input [1:0] io_iss_uops_0_lrs1_rtype,
input [1:0] io_iss_uops_0_lrs2_rtype,
input io_iss_uops_0_frs3_en,
input io_iss_uops_0_fp_val,
input io_iss_uops_0_fp_single,
input io_iss_uops_0_xcpt_pf_if,
input io_iss_uops_0_xcpt_ae_if,
input io_iss_uops_0_xcpt_ma_if,
input io_iss_uops_0_bp_debug_if,
input io_iss_uops_0_bp_xcpt_if,
input [1:0] io_iss_uops_0_debug_fsrc,
input [1:0] io_iss_uops_0_debug_tsrc,
output [5:0] io_rf_read_ports_0_addr,
input [64:0] io_rf_read_ports_0_data,
output [5:0] io_rf_read_ports_1_addr,
input [64:0] io_rf_read_ports_1_data,
output [5:0] io_rf_read_ports_2_addr,
input [64:0] io_rf_read_ports_2_data,
output io_exe_reqs_0_valid,
output [6:0] io_exe_reqs_0_bits_uop_uopc,
output [31:0] io_exe_reqs_0_bits_uop_inst,
output [31:0] io_exe_reqs_0_bits_uop_debug_inst,
output io_exe_reqs_0_bits_uop_is_rvc,
output [39:0] io_exe_reqs_0_bits_uop_debug_pc,
output [2:0] io_exe_reqs_0_bits_uop_iq_type,
output [9:0] io_exe_reqs_0_bits_uop_fu_code,
output [3:0] io_exe_reqs_0_bits_uop_ctrl_br_type,
output [1:0] io_exe_reqs_0_bits_uop_ctrl_op1_sel,
output [2:0] io_exe_reqs_0_bits_uop_ctrl_op2_sel,
output [2:0] io_exe_reqs_0_bits_uop_ctrl_imm_sel,
output [4:0] io_exe_reqs_0_bits_uop_ctrl_op_fcn,
output io_exe_reqs_0_bits_uop_ctrl_fcn_dw,
output [2:0] io_exe_reqs_0_bits_uop_ctrl_csr_cmd,
output io_exe_reqs_0_bits_uop_ctrl_is_load,
output io_exe_reqs_0_bits_uop_ctrl_is_sta,
output io_exe_reqs_0_bits_uop_ctrl_is_std,
output [1:0] io_exe_reqs_0_bits_uop_iw_state,
output io_exe_reqs_0_bits_uop_iw_p1_poisoned,
output io_exe_reqs_0_bits_uop_iw_p2_poisoned,
output io_exe_reqs_0_bits_uop_is_br,
output io_exe_reqs_0_bits_uop_is_jalr,
output io_exe_reqs_0_bits_uop_is_jal,
output io_exe_reqs_0_bits_uop_is_sfb,
output [7:0] io_exe_reqs_0_bits_uop_br_mask,
output [2:0] io_exe_reqs_0_bits_uop_br_tag,
output [3:0] io_exe_reqs_0_bits_uop_ftq_idx,
output io_exe_reqs_0_bits_uop_edge_inst,
output [5:0] io_exe_reqs_0_bits_uop_pc_lob,
output io_exe_reqs_0_bits_uop_taken,
output [19:0] io_exe_reqs_0_bits_uop_imm_packed,
output [11:0] io_exe_reqs_0_bits_uop_csr_addr,
output [4:0] io_exe_reqs_0_bits_uop_rob_idx,
output [2:0] io_exe_reqs_0_bits_uop_ldq_idx,
output [2:0] io_exe_reqs_0_bits_uop_stq_idx,
output [1:0] io_exe_reqs_0_bits_uop_rxq_idx,
output [5:0] io_exe_reqs_0_bits_uop_pdst,
output [5:0] io_exe_reqs_0_bits_uop_prs1,
output [5:0] io_exe_reqs_0_bits_uop_prs2,
output [5:0] io_exe_reqs_0_bits_uop_prs3,
output [3:0] io_exe_reqs_0_bits_uop_ppred,
output io_exe_reqs_0_bits_uop_prs1_busy,
output io_exe_reqs_0_bits_uop_prs2_busy,
output io_exe_reqs_0_bits_uop_prs3_busy,
output io_exe_reqs_0_bits_uop_ppred_busy,
output [5:0] io_exe_reqs_0_bits_uop_stale_pdst,
output io_exe_reqs_0_bits_uop_exception,
output [63:0] io_exe_reqs_0_bits_uop_exc_cause,
output io_exe_reqs_0_bits_uop_bypassable,
output [4:0] io_exe_reqs_0_bits_uop_mem_cmd,
output [1:0] io_exe_reqs_0_bits_uop_mem_size,
output io_exe_reqs_0_bits_uop_mem_signed,
output io_exe_reqs_0_bits_uop_is_fence,
output io_exe_reqs_0_bits_uop_is_fencei,
output io_exe_reqs_0_bits_uop_is_amo,
output io_exe_reqs_0_bits_uop_uses_ldq,
output io_exe_reqs_0_bits_uop_uses_stq,
output io_exe_reqs_0_bits_uop_is_sys_pc2epc,
output io_exe_reqs_0_bits_uop_is_unique,
output io_exe_reqs_0_bits_uop_flush_on_commit,
output io_exe_reqs_0_bits_uop_ldst_is_rs1,
output [5:0] io_exe_reqs_0_bits_uop_ldst,
output [5:0] io_exe_reqs_0_bits_uop_lrs1,
output [5:0] io_exe_reqs_0_bits_uop_lrs2,
output [5:0] io_exe_reqs_0_bits_uop_lrs3,
output io_exe_reqs_0_bits_uop_ldst_val,
output [1:0] io_exe_reqs_0_bits_uop_dst_rtype,
output [1:0] io_exe_reqs_0_bits_uop_lrs1_rtype,
output [1:0] io_exe_reqs_0_bits_uop_lrs2_rtype,
output io_exe_reqs_0_bits_uop_frs3_en,
output io_exe_reqs_0_bits_uop_fp_val,
output io_exe_reqs_0_bits_uop_fp_single,
output io_exe_reqs_0_bits_uop_xcpt_pf_if,
output io_exe_reqs_0_bits_uop_xcpt_ae_if,
output io_exe_reqs_0_bits_uop_xcpt_ma_if,
output io_exe_reqs_0_bits_uop_bp_debug_if,
output io_exe_reqs_0_bits_uop_bp_xcpt_if,
output [1:0] io_exe_reqs_0_bits_uop_debug_fsrc,
output [1:0] io_exe_reqs_0_bits_uop_debug_tsrc,
output [64:0] io_exe_reqs_0_bits_rs1_data,
output [64:0] io_exe_reqs_0_bits_rs2_data,
output [64:0] io_exe_reqs_0_bits_rs3_data,
input io_kill,
input [7:0] io_brupdate_b1_resolve_mask,
input [7:0] io_brupdate_b1_mispredict_mask
);
wire _rrd_decode_unit_io_rrd_valid;
wire [6:0] _rrd_decode_unit_io_rrd_uop_uopc;
wire [31:0] _rrd_decode_unit_io_rrd_uop_inst;
wire [31:0] _rrd_decode_unit_io_rrd_uop_debug_inst;
wire _rrd_decode_unit_io_rrd_uop_is_rvc;
wire [39:0] _rrd_decode_unit_io_rrd_uop_debug_pc;
wire [2:0] _rrd_decode_unit_io_rrd_uop_iq_type;
wire [9:0] _rrd_decode_unit_io_rrd_uop_fu_code;
wire [3:0] _rrd_decode_unit_io_rrd_uop_ctrl_br_type;
wire [1:0] _rrd_decode_unit_io_rrd_uop_ctrl_op1_sel;
wire [2:0] _rrd_decode_unit_io_rrd_uop_ctrl_op2_sel;
wire [2:0] _rrd_decode_unit_io_rrd_uop_ctrl_imm_sel;
wire [4:0] _rrd_decode_unit_io_rrd_uop_ctrl_op_fcn;
wire _rrd_decode_unit_io_rrd_uop_ctrl_fcn_dw;
wire [2:0] _rrd_decode_unit_io_rrd_uop_ctrl_csr_cmd;
wire _rrd_decode_unit_io_rrd_uop_ctrl_is_load;
wire _rrd_decode_unit_io_rrd_uop_ctrl_is_sta;
wire _rrd_decode_unit_io_rrd_uop_ctrl_is_std;
wire [1:0] _rrd_decode_unit_io_rrd_uop_iw_state;
wire _rrd_decode_unit_io_rrd_uop_is_br;
wire _rrd_decode_unit_io_rrd_uop_is_jalr;
wire _rrd_decode_unit_io_rrd_uop_is_jal;
wire _rrd_decode_unit_io_rrd_uop_is_sfb;
wire [7:0] _rrd_decode_unit_io_rrd_uop_br_mask;
wire [2:0] _rrd_decode_unit_io_rrd_uop_br_tag;
wire [3:0] _rrd_decode_unit_io_rrd_uop_ftq_idx;
wire _rrd_decode_unit_io_rrd_uop_edge_inst;
wire [5:0] _rrd_decode_unit_io_rrd_uop_pc_lob;
wire _rrd_decode_unit_io_rrd_uop_taken;
wire [19:0] _rrd_decode_unit_io_rrd_uop_imm_packed;
wire [11:0] _rrd_decode_unit_io_rrd_uop_csr_addr;
wire [4:0] _rrd_decode_unit_io_rrd_uop_rob_idx;
wire [2:0] _rrd_decode_unit_io_rrd_uop_ldq_idx;
wire [2:0] _rrd_decode_unit_io_rrd_uop_stq_idx;
wire [1:0] _rrd_decode_unit_io_rrd_uop_rxq_idx;
wire [5:0] _rrd_decode_unit_io_rrd_uop_pdst;
wire [5:0] _rrd_decode_unit_io_rrd_uop_prs1;
wire [5:0] _rrd_decode_unit_io_rrd_uop_prs2;
wire [5:0] _rrd_decode_unit_io_rrd_uop_prs3;
wire [3:0] _rrd_decode_unit_io_rrd_uop_ppred;
wire _rrd_decode_unit_io_rrd_uop_prs1_busy;
wire _rrd_decode_unit_io_rrd_uop_prs2_busy;
wire _rrd_decode_unit_io_rrd_uop_prs3_busy;
wire _rrd_decode_unit_io_rrd_uop_ppred_busy;
wire [5:0] _rrd_decode_unit_io_rrd_uop_stale_pdst;
wire _rrd_decode_unit_io_rrd_uop_exception;
wire [63:0] _rrd_decode_unit_io_rrd_uop_exc_cause;
wire _rrd_decode_unit_io_rrd_uop_bypassable;
wire [4:0] _rrd_decode_unit_io_rrd_uop_mem_cmd;
wire [1:0] _rrd_decode_unit_io_rrd_uop_mem_size;
wire _rrd_decode_unit_io_rrd_uop_mem_signed;
wire _rrd_decode_unit_io_rrd_uop_is_fence;
wire _rrd_decode_unit_io_rrd_uop_is_fencei;
wire _rrd_decode_unit_io_rrd_uop_is_amo;
wire _rrd_decode_unit_io_rrd_uop_uses_ldq;
wire _rrd_decode_unit_io_rrd_uop_uses_stq;
wire _rrd_decode_unit_io_rrd_uop_is_sys_pc2epc;
wire _rrd_decode_unit_io_rrd_uop_is_unique;
wire _rrd_decode_unit_io_rrd_uop_flush_on_commit;
wire _rrd_decode_unit_io_rrd_uop_ldst_is_rs1;
wire [5:0] _rrd_decode_unit_io_rrd_uop_ldst;
wire [5:0] _rrd_decode_unit_io_rrd_uop_lrs1;
wire [5:0] _rrd_decode_unit_io_rrd_uop_lrs2;
wire [5:0] _rrd_decode_unit_io_rrd_uop_lrs3;
wire _rrd_decode_unit_io_rrd_uop_ldst_val;
wire [1:0] _rrd_decode_unit_io_rrd_uop_dst_rtype;
wire [1:0] _rrd_decode_unit_io_rrd_uop_lrs1_rtype;
wire [1:0] _rrd_decode_unit_io_rrd_uop_lrs2_rtype;
wire _rrd_decode_unit_io_rrd_uop_frs3_en;
wire _rrd_decode_unit_io_rrd_uop_fp_val;
wire _rrd_decode_unit_io_rrd_uop_fp_single;
wire _rrd_decode_unit_io_rrd_uop_xcpt_pf_if;
wire _rrd_decode_unit_io_rrd_uop_xcpt_ae_if;
wire _rrd_decode_unit_io_rrd_uop_xcpt_ma_if;
wire _rrd_decode_unit_io_rrd_uop_bp_debug_if;
wire _rrd_decode_unit_io_rrd_uop_bp_xcpt_if;
wire [1:0] _rrd_decode_unit_io_rrd_uop_debug_fsrc;
wire [1:0] _rrd_decode_unit_io_rrd_uop_debug_tsrc;
reg exe_reg_valids_0;
reg [6:0] exe_reg_uops_0_uopc;
reg [31:0] exe_reg_uops_0_inst;
reg [31:0] exe_reg_uops_0_debug_inst;
reg exe_reg_uops_0_is_rvc;
reg [39:0] exe_reg_uops_0_debug_pc;
reg [2:0] exe_reg_uops_0_iq_type;
reg [9:0] exe_reg_uops_0_fu_code;
reg [3:0] exe_reg_uops_0_ctrl_br_type;
reg [1:0] exe_reg_uops_0_ctrl_op1_sel;
reg [2:0] exe_reg_uops_0_ctrl_op2_sel;
reg [2:0] exe_reg_uops_0_ctrl_imm_sel;
reg [4:0] exe_reg_uops_0_ctrl_op_fcn;
reg exe_reg_uops_0_ctrl_fcn_dw;
reg [2:0] exe_reg_uops_0_ctrl_csr_cmd;
reg exe_reg_uops_0_ctrl_is_load;
reg exe_reg_uops_0_ctrl_is_sta;
reg exe_reg_uops_0_ctrl_is_std;
reg [1:0] exe_reg_uops_0_iw_state;
reg exe_reg_uops_0_iw_p1_poisoned;
reg exe_reg_uops_0_iw_p2_poisoned;
reg exe_reg_uops_0_is_br;
reg exe_reg_uops_0_is_jalr;
reg exe_reg_uops_0_is_jal;
reg exe_reg_uops_0_is_sfb;
reg [7:0] exe_reg_uops_0_br_mask;
reg [2:0] exe_reg_uops_0_br_tag;
reg [3:0] exe_reg_uops_0_ftq_idx;
reg exe_reg_uops_0_edge_inst;
reg [5:0] exe_reg_uops_0_pc_lob;
reg exe_reg_uops_0_taken;
reg [19:0] exe_reg_uops_0_imm_packed;
reg [11:0] exe_reg_uops_0_csr_addr;
reg [4:0] exe_reg_uops_0_rob_idx;
reg [2:0] exe_reg_uops_0_ldq_idx;
reg [2:0] exe_reg_uops_0_stq_idx;
reg [1:0] exe_reg_uops_0_rxq_idx;
reg [5:0] exe_reg_uops_0_pdst;
reg [5:0] exe_reg_uops_0_prs1;
reg [5:0] exe_reg_uops_0_prs2;
reg [5:0] exe_reg_uops_0_prs3;
reg [3:0] exe_reg_uops_0_ppred;
reg exe_reg_uops_0_prs1_busy;
reg exe_reg_uops_0_prs2_busy;
reg exe_reg_uops_0_prs3_busy;
reg exe_reg_uops_0_ppred_busy;
reg [5:0] exe_reg_uops_0_stale_pdst;
reg exe_reg_uops_0_exception;
reg [63:0] exe_reg_uops_0_exc_cause;
reg exe_reg_uops_0_bypassable;
reg [4:0] exe_reg_uops_0_mem_cmd;
reg [1:0] exe_reg_uops_0_mem_size;
reg exe_reg_uops_0_mem_signed;
reg exe_reg_uops_0_is_fence;
reg exe_reg_uops_0_is_fencei;
reg exe_reg_uops_0_is_amo;
reg exe_reg_uops_0_uses_ldq;
reg exe_reg_uops_0_uses_stq;
reg exe_reg_uops_0_is_sys_pc2epc;
reg exe_reg_uops_0_is_unique;
reg exe_reg_uops_0_flush_on_commit;
reg exe_reg_uops_0_ldst_is_rs1;
reg [5:0] exe_reg_uops_0_ldst;
reg [5:0] exe_reg_uops_0_lrs1;
reg [5:0] exe_reg_uops_0_lrs2;
reg [5:0] exe_reg_uops_0_lrs3;
reg exe_reg_uops_0_ldst_val;
reg [1:0] exe_reg_uops_0_dst_rtype;
reg [1:0] exe_reg_uops_0_lrs1_rtype;
reg [1:0] exe_reg_uops_0_lrs2_rtype;
reg exe_reg_uops_0_frs3_en;
reg exe_reg_uops_0_fp_val;
reg exe_reg_uops_0_fp_single;
reg exe_reg_uops_0_xcpt_pf_if;
reg exe_reg_uops_0_xcpt_ae_if;
reg exe_reg_uops_0_xcpt_ma_if;
reg exe_reg_uops_0_bp_debug_if;
reg exe_reg_uops_0_bp_xcpt_if;
reg [1:0] exe_reg_uops_0_debug_fsrc;
reg [1:0] exe_reg_uops_0_debug_tsrc;
reg [64:0] exe_reg_rs1_data_0;
reg [64:0] exe_reg_rs2_data_0;
reg [64:0] exe_reg_rs3_data_0;
reg rrd_valids_0_REG;
reg [6:0] rrd_uops_0_REG_uopc;
reg [31:0] rrd_uops_0_REG_inst;
reg [31:0] rrd_uops_0_REG_debug_inst;
reg rrd_uops_0_REG_is_rvc;
reg [39:0] rrd_uops_0_REG_debug_pc;
reg [2:0] rrd_uops_0_REG_iq_type;
reg [9:0] rrd_uops_0_REG_fu_code;
reg [3:0] rrd_uops_0_REG_ctrl_br_type;
reg [1:0] rrd_uops_0_REG_ctrl_op1_sel;
reg [2:0] rrd_uops_0_REG_ctrl_op2_sel;
reg [2:0] rrd_uops_0_REG_ctrl_imm_sel;
reg [4:0] rrd_uops_0_REG_ctrl_op_fcn;
reg rrd_uops_0_REG_ctrl_fcn_dw;
reg [2:0] rrd_uops_0_REG_ctrl_csr_cmd;
reg rrd_uops_0_REG_ctrl_is_load;
reg rrd_uops_0_REG_ctrl_is_sta;
reg rrd_uops_0_REG_ctrl_is_std;
reg [1:0] rrd_uops_0_REG_iw_state;
reg rrd_uops_0_REG_iw_p1_poisoned;
reg rrd_uops_0_REG_iw_p2_poisoned;
reg rrd_uops_0_REG_is_br;
reg rrd_uops_0_REG_is_jalr;
reg rrd_uops_0_REG_is_jal;
reg rrd_uops_0_REG_is_sfb;
reg [7:0] rrd_uops_0_REG_br_mask;
reg [2:0] rrd_uops_0_REG_br_tag;
reg [3:0] rrd_uops_0_REG_ftq_idx;
reg rrd_uops_0_REG_edge_inst;
reg [5:0] rrd_uops_0_REG_pc_lob;
reg rrd_uops_0_REG_taken;
reg [19:0] rrd_uops_0_REG_imm_packed;
reg [11:0] rrd_uops_0_REG_csr_addr;
reg [4:0] rrd_uops_0_REG_rob_idx;
reg [2:0] rrd_uops_0_REG_ldq_idx;
reg [2:0] rrd_uops_0_REG_stq_idx;
reg [1:0] rrd_uops_0_REG_rxq_idx;
reg [5:0] rrd_uops_0_REG_pdst;
reg [5:0] rrd_uops_0_REG_prs1;
reg [5:0] rrd_uops_0_REG_prs2;
reg [5:0] rrd_uops_0_REG_prs3;
reg [3:0] rrd_uops_0_REG_ppred;
reg rrd_uops_0_REG_prs1_busy;
reg rrd_uops_0_REG_prs2_busy;
reg rrd_uops_0_REG_prs3_busy;
reg rrd_uops_0_REG_ppred_busy;
reg [5:0] rrd_uops_0_REG_stale_pdst;
reg rrd_uops_0_REG_exception;
reg [63:0] rrd_uops_0_REG_exc_cause;
reg rrd_uops_0_REG_bypassable;
reg [4:0] rrd_uops_0_REG_mem_cmd;
reg [1:0] rrd_uops_0_REG_mem_size;
reg rrd_uops_0_REG_mem_signed;
reg rrd_uops_0_REG_is_fence;
reg rrd_uops_0_REG_is_fencei;
reg rrd_uops_0_REG_is_amo;
reg rrd_uops_0_REG_uses_ldq;
reg rrd_uops_0_REG_uses_stq;
reg rrd_uops_0_REG_is_sys_pc2epc;
reg rrd_uops_0_REG_is_unique;
reg rrd_uops_0_REG_flush_on_commit;
reg rrd_uops_0_REG_ldst_is_rs1;
reg [5:0] rrd_uops_0_REG_ldst;
reg [5:0] rrd_uops_0_REG_lrs1;
reg [5:0] rrd_uops_0_REG_lrs2;
reg [5:0] rrd_uops_0_REG_lrs3;
reg rrd_uops_0_REG_ldst_val;
reg [1:0] rrd_uops_0_REG_dst_rtype;
reg [1:0] rrd_uops_0_REG_lrs1_rtype;
reg [1:0] rrd_uops_0_REG_lrs2_rtype;
reg rrd_uops_0_REG_frs3_en;
reg rrd_uops_0_REG_fp_val;
reg rrd_uops_0_REG_fp_single;
reg rrd_uops_0_REG_xcpt_pf_if;
reg rrd_uops_0_REG_xcpt_ae_if;
reg rrd_uops_0_REG_xcpt_ma_if;
reg rrd_uops_0_REG_bp_debug_if;
reg rrd_uops_0_REG_bp_xcpt_if;
reg [1:0] rrd_uops_0_REG_debug_fsrc;
reg [1:0] rrd_uops_0_REG_debug_tsrc;
reg rrd_rs1_data_0_REG;
reg rrd_rs2_data_0_REG;
reg rrd_rs3_data_0_REG;
wire [8:0] _GEN = {io_kill, io_brupdate_b1_mispredict_mask & rrd_uops_0_REG_br_mask};
always @(posedge clock) begin
if (reset)
exe_reg_valids_0 <= 1'h0;
else
exe_reg_valids_0 <= ~(|_GEN) & rrd_valids_0_REG;
exe_reg_uops_0_uopc <= (|_GEN) ? 7'h0 : rrd_uops_0_REG_uopc;
exe_reg_uops_0_inst <= (|_GEN) ? 32'h0 : rrd_uops_0_REG_inst;
exe_reg_uops_0_debug_inst <= (|_GEN) ? 32'h0 : rrd_uops_0_REG_debug_inst;
exe_reg_uops_0_is_rvc <= ~(|_GEN) & rrd_uops_0_REG_is_rvc;
exe_reg_uops_0_debug_pc <= (|_GEN) ? 40'h0 : rrd_uops_0_REG_debug_pc;
exe_reg_uops_0_iq_type <= (|_GEN) ? 3'h0 : rrd_uops_0_REG_iq_type;
exe_reg_uops_0_fu_code <= (|_GEN) ? 10'h0 : rrd_uops_0_REG_fu_code;
exe_reg_uops_0_ctrl_br_type <= (|_GEN) ? 4'h0 : rrd_uops_0_REG_ctrl_br_type;
exe_reg_uops_0_ctrl_op1_sel <= (|_GEN) ? 2'h0 : rrd_uops_0_REG_ctrl_op1_sel;
exe_reg_uops_0_ctrl_op2_sel <= (|_GEN) ? 3'h0 : rrd_uops_0_REG_ctrl_op2_sel;
exe_reg_uops_0_ctrl_imm_sel <= (|_GEN) ? 3'h0 : rrd_uops_0_REG_ctrl_imm_sel;
exe_reg_uops_0_ctrl_op_fcn <= (|_GEN) ? 5'h0 : rrd_uops_0_REG_ctrl_op_fcn;
exe_reg_uops_0_ctrl_fcn_dw <= ~(|_GEN) & rrd_uops_0_REG_ctrl_fcn_dw;
exe_reg_uops_0_ctrl_csr_cmd <= (|_GEN) ? 3'h0 : rrd_uops_0_REG_ctrl_csr_cmd;
exe_reg_uops_0_ctrl_is_load <= ~(|_GEN) & rrd_uops_0_REG_ctrl_is_load;
exe_reg_uops_0_ctrl_is_sta <= ~(|_GEN) & rrd_uops_0_REG_ctrl_is_sta;
exe_reg_uops_0_ctrl_is_std <= ~(|_GEN) & rrd_uops_0_REG_ctrl_is_std;
exe_reg_uops_0_iw_state <= (|_GEN) ? 2'h0 : rrd_uops_0_REG_iw_state;
exe_reg_uops_0_iw_p1_poisoned <= ~(|_GEN) & rrd_uops_0_REG_iw_p1_poisoned;
exe_reg_uops_0_iw_p2_poisoned <= ~(|_GEN) & rrd_uops_0_REG_iw_p2_poisoned;
exe_reg_uops_0_is_br <= ~(|_GEN) & rrd_uops_0_REG_is_br;
exe_reg_uops_0_is_jalr <= ~(|_GEN) & rrd_uops_0_REG_is_jalr;
exe_reg_uops_0_is_jal <= ~(|_GEN) & rrd_uops_0_REG_is_jal;
exe_reg_uops_0_is_sfb <= ~(|_GEN) & rrd_uops_0_REG_is_sfb;
exe_reg_uops_0_br_mask <= rrd_uops_0_REG_br_mask & ~io_brupdate_b1_resolve_mask;
exe_reg_uops_0_br_tag <= (|_GEN) ? 3'h0 : rrd_uops_0_REG_br_tag;
exe_reg_uops_0_ftq_idx <= (|_GEN) ? 4'h0 : rrd_uops_0_REG_ftq_idx;
exe_reg_uops_0_edge_inst <= ~(|_GEN) & rrd_uops_0_REG_edge_inst;
exe_reg_uops_0_pc_lob <= (|_GEN) ? 6'h0 : rrd_uops_0_REG_pc_lob;
exe_reg_uops_0_taken <= ~(|_GEN) & rrd_uops_0_REG_taken;
exe_reg_uops_0_imm_packed <= (|_GEN) ? 20'h0 : rrd_uops_0_REG_imm_packed;
exe_reg_uops_0_csr_addr <= (|_GEN) ? 12'h0 : rrd_uops_0_REG_csr_addr;
exe_reg_uops_0_rob_idx <= (|_GEN) ? 5'h0 : rrd_uops_0_REG_rob_idx;
exe_reg_uops_0_ldq_idx <= (|_GEN) ? 3'h0 : rrd_uops_0_REG_ldq_idx;
exe_reg_uops_0_stq_idx <= (|_GEN) ? 3'h0 : rrd_uops_0_REG_stq_idx;
exe_reg_uops_0_rxq_idx <= (|_GEN) ? 2'h0 : rrd_uops_0_REG_rxq_idx;
exe_reg_uops_0_pdst <= (|_GEN) ? 6'h0 : rrd_uops_0_REG_pdst;
exe_reg_uops_0_prs1 <= (|_GEN) ? 6'h0 : rrd_uops_0_REG_prs1;
exe_reg_uops_0_prs2 <= (|_GEN) ? 6'h0 : rrd_uops_0_REG_prs2;
exe_reg_uops_0_prs3 <= (|_GEN) ? 6'h0 : rrd_uops_0_REG_prs3;
exe_reg_uops_0_ppred <= (|_GEN) ? 4'h0 : rrd_uops_0_REG_ppred;
exe_reg_uops_0_prs1_busy <= ~(|_GEN) & rrd_uops_0_REG_prs1_busy;
exe_reg_uops_0_prs2_busy <= ~(|_GEN) & rrd_uops_0_REG_prs2_busy;
exe_reg_uops_0_prs3_busy <= ~(|_GEN) & rrd_uops_0_REG_prs3_busy;
exe_reg_uops_0_ppred_busy <= ~(|_GEN) & rrd_uops_0_REG_ppred_busy;
exe_reg_uops_0_stale_pdst <= (|_GEN) ? 6'h0 : rrd_uops_0_REG_stale_pdst;
exe_reg_uops_0_exception <= ~(|_GEN) & rrd_uops_0_REG_exception;
exe_reg_uops_0_exc_cause <= (|_GEN) ? 64'h0 : rrd_uops_0_REG_exc_cause;
exe_reg_uops_0_bypassable <= ~(|_GEN) & rrd_uops_0_REG_bypassable;
exe_reg_uops_0_mem_cmd <= (|_GEN) ? 5'h0 : rrd_uops_0_REG_mem_cmd;
exe_reg_uops_0_mem_size <= (|_GEN) ? 2'h0 : rrd_uops_0_REG_mem_size;
exe_reg_uops_0_mem_signed <= ~(|_GEN) & rrd_uops_0_REG_mem_signed;
exe_reg_uops_0_is_fence <= ~(|_GEN) & rrd_uops_0_REG_is_fence;
exe_reg_uops_0_is_fencei <= ~(|_GEN) & rrd_uops_0_REG_is_fencei;
exe_reg_uops_0_is_amo <= ~(|_GEN) & rrd_uops_0_REG_is_amo;
exe_reg_uops_0_uses_ldq <= ~(|_GEN) & rrd_uops_0_REG_uses_ldq;
exe_reg_uops_0_uses_stq <= ~(|_GEN) & rrd_uops_0_REG_uses_stq;
exe_reg_uops_0_is_sys_pc2epc <= ~(|_GEN) & rrd_uops_0_REG_is_sys_pc2epc;
exe_reg_uops_0_is_unique <= ~(|_GEN) & rrd_uops_0_REG_is_unique;
exe_reg_uops_0_flush_on_commit <= ~(|_GEN) & rrd_uops_0_REG_flush_on_commit;
exe_reg_uops_0_ldst_is_rs1 <= ~(|_GEN) & rrd_uops_0_REG_ldst_is_rs1;
exe_reg_uops_0_ldst <= (|_GEN) ? 6'h0 : rrd_uops_0_REG_ldst;
exe_reg_uops_0_lrs1 <= (|_GEN) ? 6'h0 : rrd_uops_0_REG_lrs1;
exe_reg_uops_0_lrs2 <= (|_GEN) ? 6'h0 : rrd_uops_0_REG_lrs2;
exe_reg_uops_0_lrs3 <= (|_GEN) ? 6'h0 : rrd_uops_0_REG_lrs3;
exe_reg_uops_0_ldst_val <= ~(|_GEN) & rrd_uops_0_REG_ldst_val;
exe_reg_uops_0_dst_rtype <= (|_GEN) ? 2'h2 : rrd_uops_0_REG_dst_rtype;
exe_reg_uops_0_lrs1_rtype <= (|_GEN) ? 2'h0 : rrd_uops_0_REG_lrs1_rtype;
exe_reg_uops_0_lrs2_rtype <= (|_GEN) ? 2'h0 : rrd_uops_0_REG_lrs2_rtype;
exe_reg_uops_0_frs3_en <= ~(|_GEN) & rrd_uops_0_REG_frs3_en;
exe_reg_uops_0_fp_val <= ~(|_GEN) & rrd_uops_0_REG_fp_val;
exe_reg_uops_0_fp_single <= ~(|_GEN) & rrd_uops_0_REG_fp_single;
exe_reg_uops_0_xcpt_pf_if <= ~(|_GEN) & rrd_uops_0_REG_xcpt_pf_if;
exe_reg_uops_0_xcpt_ae_if <= ~(|_GEN) & rrd_uops_0_REG_xcpt_ae_if;
exe_reg_uops_0_xcpt_ma_if <= ~(|_GEN) & rrd_uops_0_REG_xcpt_ma_if;
exe_reg_uops_0_bp_debug_if <= ~(|_GEN) & rrd_uops_0_REG_bp_debug_if;
exe_reg_uops_0_bp_xcpt_if <= ~(|_GEN) & rrd_uops_0_REG_bp_xcpt_if;
exe_reg_uops_0_debug_fsrc <= (|_GEN) ? 2'h0 : rrd_uops_0_REG_debug_fsrc;
exe_reg_uops_0_debug_tsrc <= (|_GEN) ? 2'h0 : rrd_uops_0_REG_debug_tsrc;
exe_reg_rs1_data_0 <= rrd_rs1_data_0_REG ? 65'h0 : io_rf_read_ports_0_data;
exe_reg_rs2_data_0 <= rrd_rs2_data_0_REG ? 65'h0 : io_rf_read_ports_1_data;
exe_reg_rs3_data_0 <= rrd_rs3_data_0_REG ? 65'h0 : io_rf_read_ports_2_data;
rrd_valids_0_REG <= _rrd_decode_unit_io_rrd_valid & (io_brupdate_b1_mispredict_mask & _rrd_decode_unit_io_rrd_uop_br_mask) == 8'h0;
rrd_uops_0_REG_uopc <= _rrd_decode_unit_io_rrd_uop_uopc;
rrd_uops_0_REG_inst <= _rrd_decode_unit_io_rrd_uop_inst;
rrd_uops_0_REG_debug_inst <= _rrd_decode_unit_io_rrd_uop_debug_inst;
rrd_uops_0_REG_is_rvc <= _rrd_decode_unit_io_rrd_uop_is_rvc;
rrd_uops_0_REG_debug_pc <= _rrd_decode_unit_io_rrd_uop_debug_pc;
rrd_uops_0_REG_iq_type <= _rrd_decode_unit_io_rrd_uop_iq_type;
rrd_uops_0_REG_fu_code <= _rrd_decode_unit_io_rrd_uop_fu_code;
rrd_uops_0_REG_ctrl_br_type <= _rrd_decode_unit_io_rrd_uop_ctrl_br_type;
rrd_uops_0_REG_ctrl_op1_sel <= _rrd_decode_unit_io_rrd_uop_ctrl_op1_sel;
rrd_uops_0_REG_ctrl_op2_sel <= _rrd_decode_unit_io_rrd_uop_ctrl_op2_sel;
rrd_uops_0_REG_ctrl_imm_sel <= _rrd_decode_unit_io_rrd_uop_ctrl_imm_sel;
rrd_uops_0_REG_ctrl_op_fcn <= _rrd_decode_unit_io_rrd_uop_ctrl_op_fcn;
rrd_uops_0_REG_ctrl_fcn_dw <= _rrd_decode_unit_io_rrd_uop_ctrl_fcn_dw;
rrd_uops_0_REG_ctrl_csr_cmd <= _rrd_decode_unit_io_rrd_uop_ctrl_csr_cmd;
rrd_uops_0_REG_ctrl_is_load <= _rrd_decode_unit_io_rrd_uop_ctrl_is_load;
rrd_uops_0_REG_ctrl_is_sta <= _rrd_decode_unit_io_rrd_uop_ctrl_is_sta;
rrd_uops_0_REG_ctrl_is_std <= _rrd_decode_unit_io_rrd_uop_ctrl_is_std;
rrd_uops_0_REG_iw_state <= _rrd_decode_unit_io_rrd_uop_iw_state;
rrd_uops_0_REG_iw_p1_poisoned <= 1'h0;
rrd_uops_0_REG_iw_p2_poisoned <= 1'h0;
rrd_uops_0_REG_is_br <= _rrd_decode_unit_io_rrd_uop_is_br;
rrd_uops_0_REG_is_jalr <= _rrd_decode_unit_io_rrd_uop_is_jalr;
rrd_uops_0_REG_is_jal <= _rrd_decode_unit_io_rrd_uop_is_jal;
rrd_uops_0_REG_is_sfb <= _rrd_decode_unit_io_rrd_uop_is_sfb;
rrd_uops_0_REG_br_mask <= _rrd_decode_unit_io_rrd_uop_br_mask & ~io_brupdate_b1_resolve_mask;
rrd_uops_0_REG_br_tag <= _rrd_decode_unit_io_rrd_uop_br_tag;
rrd_uops_0_REG_ftq_idx <= _rrd_decode_unit_io_rrd_uop_ftq_idx;
rrd_uops_0_REG_edge_inst <= _rrd_decode_unit_io_rrd_uop_edge_inst;
rrd_uops_0_REG_pc_lob <= _rrd_decode_unit_io_rrd_uop_pc_lob;
rrd_uops_0_REG_taken <= _rrd_decode_unit_io_rrd_uop_taken;
rrd_uops_0_REG_imm_packed <= _rrd_decode_unit_io_rrd_uop_imm_packed;
rrd_uops_0_REG_csr_addr <= _rrd_decode_unit_io_rrd_uop_csr_addr;
rrd_uops_0_REG_rob_idx <= _rrd_decode_unit_io_rrd_uop_rob_idx;
rrd_uops_0_REG_ldq_idx <= _rrd_decode_unit_io_rrd_uop_ldq_idx;
rrd_uops_0_REG_stq_idx <= _rrd_decode_unit_io_rrd_uop_stq_idx;
rrd_uops_0_REG_rxq_idx <= _rrd_decode_unit_io_rrd_uop_rxq_idx;
rrd_uops_0_REG_pdst <= _rrd_decode_unit_io_rrd_uop_pdst;
rrd_uops_0_REG_prs1 <= _rrd_decode_unit_io_rrd_uop_prs1;
rrd_uops_0_REG_prs2 <= _rrd_decode_unit_io_rrd_uop_prs2;
rrd_uops_0_REG_prs3 <= _rrd_decode_unit_io_rrd_uop_prs3;
rrd_uops_0_REG_ppred <= _rrd_decode_unit_io_rrd_uop_ppred;
rrd_uops_0_REG_prs1_busy <= _rrd_decode_unit_io_rrd_uop_prs1_busy;
rrd_uops_0_REG_prs2_busy <= _rrd_decode_unit_io_rrd_uop_prs2_busy;
rrd_uops_0_REG_prs3_busy <= _rrd_decode_unit_io_rrd_uop_prs3_busy;
rrd_uops_0_REG_ppred_busy <= _rrd_decode_unit_io_rrd_uop_ppred_busy;
rrd_uops_0_REG_stale_pdst <= _rrd_decode_unit_io_rrd_uop_stale_pdst;
rrd_uops_0_REG_exception <= _rrd_decode_unit_io_rrd_uop_exception;
rrd_uops_0_REG_exc_cause <= _rrd_decode_unit_io_rrd_uop_exc_cause;
rrd_uops_0_REG_bypassable <= _rrd_decode_unit_io_rrd_uop_bypassable;
rrd_uops_0_REG_mem_cmd <= _rrd_decode_unit_io_rrd_uop_mem_cmd;
rrd_uops_0_REG_mem_size <= _rrd_decode_unit_io_rrd_uop_mem_size;
rrd_uops_0_REG_mem_signed <= _rrd_decode_unit_io_rrd_uop_mem_signed;
rrd_uops_0_REG_is_fence <= _rrd_decode_unit_io_rrd_uop_is_fence;
rrd_uops_0_REG_is_fencei <= _rrd_decode_unit_io_rrd_uop_is_fencei;
rrd_uops_0_REG_is_amo <= _rrd_decode_unit_io_rrd_uop_is_amo;
rrd_uops_0_REG_uses_ldq <= _rrd_decode_unit_io_rrd_uop_uses_ldq;
rrd_uops_0_REG_uses_stq <= _rrd_decode_unit_io_rrd_uop_uses_stq;
rrd_uops_0_REG_is_sys_pc2epc <= _rrd_decode_unit_io_rrd_uop_is_sys_pc2epc;
rrd_uops_0_REG_is_unique <= _rrd_decode_unit_io_rrd_uop_is_unique;
rrd_uops_0_REG_flush_on_commit <= _rrd_decode_unit_io_rrd_uop_flush_on_commit;
rrd_uops_0_REG_ldst_is_rs1 <= _rrd_decode_unit_io_rrd_uop_ldst_is_rs1;
rrd_uops_0_REG_ldst <= _rrd_decode_unit_io_rrd_uop_ldst;
rrd_uops_0_REG_lrs1 <= _rrd_decode_unit_io_rrd_uop_lrs1;
rrd_uops_0_REG_lrs2 <= _rrd_decode_unit_io_rrd_uop_lrs2;
rrd_uops_0_REG_lrs3 <= _rrd_decode_unit_io_rrd_uop_lrs3;
rrd_uops_0_REG_ldst_val <= _rrd_decode_unit_io_rrd_uop_ldst_val;
rrd_uops_0_REG_dst_rtype <= _rrd_decode_unit_io_rrd_uop_dst_rtype;
rrd_uops_0_REG_lrs1_rtype <= _rrd_decode_unit_io_rrd_uop_lrs1_rtype;
rrd_uops_0_REG_lrs2_rtype <= _rrd_decode_unit_io_rrd_uop_lrs2_rtype;
rrd_uops_0_REG_frs3_en <= _rrd_decode_unit_io_rrd_uop_frs3_en;
rrd_uops_0_REG_fp_val <= _rrd_decode_unit_io_rrd_uop_fp_val;
rrd_uops_0_REG_fp_single <= _rrd_decode_unit_io_rrd_uop_fp_single;
rrd_uops_0_REG_xcpt_pf_if <= _rrd_decode_unit_io_rrd_uop_xcpt_pf_if;
rrd_uops_0_REG_xcpt_ae_if <= _rrd_decode_unit_io_rrd_uop_xcpt_ae_if;
rrd_uops_0_REG_xcpt_ma_if <= _rrd_decode_unit_io_rrd_uop_xcpt_ma_if;
rrd_uops_0_REG_bp_debug_if <= _rrd_decode_unit_io_rrd_uop_bp_debug_if;
rrd_uops_0_REG_bp_xcpt_if <= _rrd_decode_unit_io_rrd_uop_bp_xcpt_if;
rrd_uops_0_REG_debug_fsrc <= _rrd_decode_unit_io_rrd_uop_debug_fsrc;
rrd_uops_0_REG_debug_tsrc <= _rrd_decode_unit_io_rrd_uop_debug_tsrc;
rrd_rs1_data_0_REG <= io_iss_uops_0_prs1 == 6'h0;
rrd_rs2_data_0_REG <= io_iss_uops_0_prs2 == 6'h0;
rrd_rs3_data_0_REG <= io_iss_uops_0_prs3 == 6'h0;
end
RegisterReadDecode rrd_decode_unit (
.io_iss_valid (io_iss_valids_0),
.io_iss_uop_uopc (io_iss_uops_0_uopc),
.io_iss_uop_inst (io_iss_uops_0_inst),
.io_iss_uop_debug_inst (io_iss_uops_0_debug_inst),
.io_iss_uop_is_rvc (io_iss_uops_0_is_rvc),
.io_iss_uop_debug_pc (io_iss_uops_0_debug_pc),
.io_iss_uop_iq_type (io_iss_uops_0_iq_type),
.io_iss_uop_fu_code (io_iss_uops_0_fu_code),
.io_iss_uop_iw_state (io_iss_uops_0_iw_state),
.io_iss_uop_is_br (io_iss_uops_0_is_br),
.io_iss_uop_is_jalr (io_iss_uops_0_is_jalr),
.io_iss_uop_is_jal (io_iss_uops_0_is_jal),
.io_iss_uop_is_sfb (io_iss_uops_0_is_sfb),
.io_iss_uop_br_mask (io_iss_uops_0_br_mask),
.io_iss_uop_br_tag (io_iss_uops_0_br_tag),
.io_iss_uop_ftq_idx (io_iss_uops_0_ftq_idx),
.io_iss_uop_edge_inst (io_iss_uops_0_edge_inst),
.io_iss_uop_pc_lob (io_iss_uops_0_pc_lob),
.io_iss_uop_taken (io_iss_uops_0_taken),
.io_iss_uop_imm_packed (io_iss_uops_0_imm_packed),
.io_iss_uop_csr_addr (io_iss_uops_0_csr_addr),
.io_iss_uop_rob_idx (io_iss_uops_0_rob_idx),
.io_iss_uop_ldq_idx (io_iss_uops_0_ldq_idx),
.io_iss_uop_stq_idx (io_iss_uops_0_stq_idx),
.io_iss_uop_rxq_idx (io_iss_uops_0_rxq_idx),
.io_iss_uop_pdst (io_iss_uops_0_pdst),
.io_iss_uop_prs1 (io_iss_uops_0_prs1),
.io_iss_uop_prs2 (io_iss_uops_0_prs2),
.io_iss_uop_prs3 (io_iss_uops_0_prs3),
.io_iss_uop_ppred (io_iss_uops_0_ppred),
.io_iss_uop_prs1_busy (io_iss_uops_0_prs1_busy),
.io_iss_uop_prs2_busy (io_iss_uops_0_prs2_busy),
.io_iss_uop_prs3_busy (io_iss_uops_0_prs3_busy),
.io_iss_uop_ppred_busy (io_iss_uops_0_ppred_busy),
.io_iss_uop_stale_pdst (io_iss_uops_0_stale_pdst),
.io_iss_uop_exception (io_iss_uops_0_exception),
.io_iss_uop_exc_cause (io_iss_uops_0_exc_cause),
.io_iss_uop_bypassable (io_iss_uops_0_bypassable),
.io_iss_uop_mem_cmd (io_iss_uops_0_mem_cmd),
.io_iss_uop_mem_size (io_iss_uops_0_mem_size),
.io_iss_uop_mem_signed (io_iss_uops_0_mem_signed),
.io_iss_uop_is_fence (io_iss_uops_0_is_fence),
.io_iss_uop_is_fencei (io_iss_uops_0_is_fencei),
.io_iss_uop_is_amo (io_iss_uops_0_is_amo),
.io_iss_uop_uses_ldq (io_iss_uops_0_uses_ldq),
.io_iss_uop_uses_stq (io_iss_uops_0_uses_stq),
.io_iss_uop_is_sys_pc2epc (io_iss_uops_0_is_sys_pc2epc),
.io_iss_uop_is_unique (io_iss_uops_0_is_unique),
.io_iss_uop_flush_on_commit (io_iss_uops_0_flush_on_commit),
.io_iss_uop_ldst_is_rs1 (io_iss_uops_0_ldst_is_rs1),
.io_iss_uop_ldst (io_iss_uops_0_ldst),
.io_iss_uop_lrs1 (io_iss_uops_0_lrs1),
.io_iss_uop_lrs2 (io_iss_uops_0_lrs2),
.io_iss_uop_lrs3 (io_iss_uops_0_lrs3),
.io_iss_uop_ldst_val (io_iss_uops_0_ldst_val),
.io_iss_uop_dst_rtype (io_iss_uops_0_dst_rtype),
.io_iss_uop_lrs1_rtype (io_iss_uops_0_lrs1_rtype),
.io_iss_uop_lrs2_rtype (io_iss_uops_0_lrs2_rtype),
.io_iss_uop_frs3_en (io_iss_uops_0_frs3_en),
.io_iss_uop_fp_val (io_iss_uops_0_fp_val),
.io_iss_uop_fp_single (io_iss_uops_0_fp_single),
.io_iss_uop_xcpt_pf_if (io_iss_uops_0_xcpt_pf_if),
.io_iss_uop_xcpt_ae_if (io_iss_uops_0_xcpt_ae_if),
.io_iss_uop_xcpt_ma_if (io_iss_uops_0_xcpt_ma_if),
.io_iss_uop_bp_debug_if (io_iss_uops_0_bp_debug_if),
.io_iss_uop_bp_xcpt_if (io_iss_uops_0_bp_xcpt_if),
.io_iss_uop_debug_fsrc (io_iss_uops_0_debug_fsrc),
.io_iss_uop_debug_tsrc (io_iss_uops_0_debug_tsrc),
.io_rrd_valid (_rrd_decode_unit_io_rrd_valid),
.io_rrd_uop_uopc (_rrd_decode_unit_io_rrd_uop_uopc),
.io_rrd_uop_inst (_rrd_decode_unit_io_rrd_uop_inst),
.io_rrd_uop_debug_inst (_rrd_decode_unit_io_rrd_uop_debug_inst),
.io_rrd_uop_is_rvc (_rrd_decode_unit_io_rrd_uop_is_rvc),
.io_rrd_uop_debug_pc (_rrd_decode_unit_io_rrd_uop_debug_pc),
.io_rrd_uop_iq_type (_rrd_decode_unit_io_rrd_uop_iq_type),
.io_rrd_uop_fu_code (_rrd_decode_unit_io_rrd_uop_fu_code),
.io_rrd_uop_ctrl_br_type (_rrd_decode_unit_io_rrd_uop_ctrl_br_type),
.io_rrd_uop_ctrl_op1_sel (_rrd_decode_unit_io_rrd_uop_ctrl_op1_sel),
.io_rrd_uop_ctrl_op2_sel (_rrd_decode_unit_io_rrd_uop_ctrl_op2_sel),
.io_rrd_uop_ctrl_imm_sel (_rrd_decode_unit_io_rrd_uop_ctrl_imm_sel),
.io_rrd_uop_ctrl_op_fcn (_rrd_decode_unit_io_rrd_uop_ctrl_op_fcn),
.io_rrd_uop_ctrl_fcn_dw (_rrd_decode_unit_io_rrd_uop_ctrl_fcn_dw),
.io_rrd_uop_ctrl_csr_cmd (_rrd_decode_unit_io_rrd_uop_ctrl_csr_cmd),
.io_rrd_uop_ctrl_is_load (_rrd_decode_unit_io_rrd_uop_ctrl_is_load),
.io_rrd_uop_ctrl_is_sta (_rrd_decode_unit_io_rrd_uop_ctrl_is_sta),
.io_rrd_uop_ctrl_is_std (_rrd_decode_unit_io_rrd_uop_ctrl_is_std),
.io_rrd_uop_iw_state (_rrd_decode_unit_io_rrd_uop_iw_state),
.io_rrd_uop_is_br (_rrd_decode_unit_io_rrd_uop_is_br),
.io_rrd_uop_is_jalr (_rrd_decode_unit_io_rrd_uop_is_jalr),
.io_rrd_uop_is_jal (_rrd_decode_unit_io_rrd_uop_is_jal),
.io_rrd_uop_is_sfb (_rrd_decode_unit_io_rrd_uop_is_sfb),
.io_rrd_uop_br_mask (_rrd_decode_unit_io_rrd_uop_br_mask),
.io_rrd_uop_br_tag (_rrd_decode_unit_io_rrd_uop_br_tag),
.io_rrd_uop_ftq_idx (_rrd_decode_unit_io_rrd_uop_ftq_idx),
.io_rrd_uop_edge_inst (_rrd_decode_unit_io_rrd_uop_edge_inst),
.io_rrd_uop_pc_lob (_rrd_decode_unit_io_rrd_uop_pc_lob),
.io_rrd_uop_taken (_rrd_decode_unit_io_rrd_uop_taken),
.io_rrd_uop_imm_packed (_rrd_decode_unit_io_rrd_uop_imm_packed),
.io_rrd_uop_csr_addr (_rrd_decode_unit_io_rrd_uop_csr_addr),
.io_rrd_uop_rob_idx (_rrd_decode_unit_io_rrd_uop_rob_idx),
.io_rrd_uop_ldq_idx (_rrd_decode_unit_io_rrd_uop_ldq_idx),
.io_rrd_uop_stq_idx (_rrd_decode_unit_io_rrd_uop_stq_idx),
.io_rrd_uop_rxq_idx (_rrd_decode_unit_io_rrd_uop_rxq_idx),
.io_rrd_uop_pdst (_rrd_decode_unit_io_rrd_uop_pdst),
.io_rrd_uop_prs1 (_rrd_decode_unit_io_rrd_uop_prs1),
.io_rrd_uop_prs2 (_rrd_decode_unit_io_rrd_uop_prs2),
.io_rrd_uop_prs3 (_rrd_decode_unit_io_rrd_uop_prs3),
.io_rrd_uop_ppred (_rrd_decode_unit_io_rrd_uop_ppred),
.io_rrd_uop_prs1_busy (_rrd_decode_unit_io_rrd_uop_prs1_busy),
.io_rrd_uop_prs2_busy (_rrd_decode_unit_io_rrd_uop_prs2_busy),
.io_rrd_uop_prs3_busy (_rrd_decode_unit_io_rrd_uop_prs3_busy),
.io_rrd_uop_ppred_busy (_rrd_decode_unit_io_rrd_uop_ppred_busy),
.io_rrd_uop_stale_pdst (_rrd_decode_unit_io_rrd_uop_stale_pdst),
.io_rrd_uop_exception (_rrd_decode_unit_io_rrd_uop_exception),
.io_rrd_uop_exc_cause (_rrd_decode_unit_io_rrd_uop_exc_cause),
.io_rrd_uop_bypassable (_rrd_decode_unit_io_rrd_uop_bypassable),
.io_rrd_uop_mem_cmd (_rrd_decode_unit_io_rrd_uop_mem_cmd),
.io_rrd_uop_mem_size (_rrd_decode_unit_io_rrd_uop_mem_size),
.io_rrd_uop_mem_signed (_rrd_decode_unit_io_rrd_uop_mem_signed),
.io_rrd_uop_is_fence (_rrd_decode_unit_io_rrd_uop_is_fence),
.io_rrd_uop_is_fencei (_rrd_decode_unit_io_rrd_uop_is_fencei),
.io_rrd_uop_is_amo (_rrd_decode_unit_io_rrd_uop_is_amo),
.io_rrd_uop_uses_ldq (_rrd_decode_unit_io_rrd_uop_uses_ldq),
.io_rrd_uop_uses_stq (_rrd_decode_unit_io_rrd_uop_uses_stq),
.io_rrd_uop_is_sys_pc2epc (_rrd_decode_unit_io_rrd_uop_is_sys_pc2epc),
.io_rrd_uop_is_unique (_rrd_decode_unit_io_rrd_uop_is_unique),
.io_rrd_uop_flush_on_commit (_rrd_decode_unit_io_rrd_uop_flush_on_commit),
.io_rrd_uop_ldst_is_rs1 (_rrd_decode_unit_io_rrd_uop_ldst_is_rs1),
.io_rrd_uop_ldst (_rrd_decode_unit_io_rrd_uop_ldst),
.io_rrd_uop_lrs1 (_rrd_decode_unit_io_rrd_uop_lrs1),
.io_rrd_uop_lrs2 (_rrd_decode_unit_io_rrd_uop_lrs2),
.io_rrd_uop_lrs3 (_rrd_decode_unit_io_rrd_uop_lrs3),
.io_rrd_uop_ldst_val (_rrd_decode_unit_io_rrd_uop_ldst_val),
.io_rrd_uop_dst_rtype (_rrd_decode_unit_io_rrd_uop_dst_rtype),
.io_rrd_uop_lrs1_rtype (_rrd_decode_unit_io_rrd_uop_lrs1_rtype),
.io_rrd_uop_lrs2_rtype (_rrd_decode_unit_io_rrd_uop_lrs2_rtype),
.io_rrd_uop_frs3_en (_rrd_decode_unit_io_rrd_uop_frs3_en),
.io_rrd_uop_fp_val (_rrd_decode_unit_io_rrd_uop_fp_val),
.io_rrd_uop_fp_single (_rrd_decode_unit_io_rrd_uop_fp_single),
.io_rrd_uop_xcpt_pf_if (_rrd_decode_unit_io_rrd_uop_xcpt_pf_if),
.io_rrd_uop_xcpt_ae_if (_rrd_decode_unit_io_rrd_uop_xcpt_ae_if),
.io_rrd_uop_xcpt_ma_if (_rrd_decode_unit_io_rrd_uop_xcpt_ma_if),
.io_rrd_uop_bp_debug_if (_rrd_decode_unit_io_rrd_uop_bp_debug_if),
.io_rrd_uop_bp_xcpt_if (_rrd_decode_unit_io_rrd_uop_bp_xcpt_if),
.io_rrd_uop_debug_fsrc (_rrd_decode_unit_io_rrd_uop_debug_fsrc),
.io_rrd_uop_debug_tsrc (_rrd_decode_unit_io_rrd_uop_debug_tsrc)
);
assign io_rf_read_ports_0_addr = io_iss_uops_0_prs1;
assign io_rf_read_ports_1_addr = io_iss_uops_0_prs2;
assign io_rf_read_ports_2_addr = io_iss_uops_0_prs3;
assign io_exe_reqs_0_valid = exe_reg_valids_0;
assign io_exe_reqs_0_bits_uop_uopc = exe_reg_uops_0_uopc;
assign io_exe_reqs_0_bits_uop_inst = exe_reg_uops_0_inst;
assign io_exe_reqs_0_bits_uop_debug_inst = exe_reg_uops_0_debug_inst;
assign io_exe_reqs_0_bits_uop_is_rvc = exe_reg_uops_0_is_rvc;
assign io_exe_reqs_0_bits_uop_debug_pc = exe_reg_uops_0_debug_pc;
assign io_exe_reqs_0_bits_uop_iq_type = exe_reg_uops_0_iq_type;
assign io_exe_reqs_0_bits_uop_fu_code = exe_reg_uops_0_fu_code;
assign io_exe_reqs_0_bits_uop_ctrl_br_type = exe_reg_uops_0_ctrl_br_type;
assign io_exe_reqs_0_bits_uop_ctrl_op1_sel = exe_reg_uops_0_ctrl_op1_sel;
assign io_exe_reqs_0_bits_uop_ctrl_op2_sel = exe_reg_uops_0_ctrl_op2_sel;
assign io_exe_reqs_0_bits_uop_ctrl_imm_sel = exe_reg_uops_0_ctrl_imm_sel;
assign io_exe_reqs_0_bits_uop_ctrl_op_fcn = exe_reg_uops_0_ctrl_op_fcn;
assign io_exe_reqs_0_bits_uop_ctrl_fcn_dw = exe_reg_uops_0_ctrl_fcn_dw;
assign io_exe_reqs_0_bits_uop_ctrl_csr_cmd = exe_reg_uops_0_ctrl_csr_cmd;
assign io_exe_reqs_0_bits_uop_ctrl_is_load = exe_reg_uops_0_ctrl_is_load;
assign io_exe_reqs_0_bits_uop_ctrl_is_sta = exe_reg_uops_0_ctrl_is_sta;
assign io_exe_reqs_0_bits_uop_ctrl_is_std = exe_reg_uops_0_ctrl_is_std;
assign io_exe_reqs_0_bits_uop_iw_state = exe_reg_uops_0_iw_state;
assign io_exe_reqs_0_bits_uop_iw_p1_poisoned = exe_reg_uops_0_iw_p1_poisoned;
assign io_exe_reqs_0_bits_uop_iw_p2_poisoned = exe_reg_uops_0_iw_p2_poisoned;
assign io_exe_reqs_0_bits_uop_is_br = exe_reg_uops_0_is_br;
assign io_exe_reqs_0_bits_uop_is_jalr = exe_reg_uops_0_is_jalr;
assign io_exe_reqs_0_bits_uop_is_jal = exe_reg_uops_0_is_jal;
assign io_exe_reqs_0_bits_uop_is_sfb = exe_reg_uops_0_is_sfb;
assign io_exe_reqs_0_bits_uop_br_mask = exe_reg_uops_0_br_mask;
assign io_exe_reqs_0_bits_uop_br_tag = exe_reg_uops_0_br_tag;
assign io_exe_reqs_0_bits_uop_ftq_idx = exe_reg_uops_0_ftq_idx;
assign io_exe_reqs_0_bits_uop_edge_inst = exe_reg_uops_0_edge_inst;
assign io_exe_reqs_0_bits_uop_pc_lob = exe_reg_uops_0_pc_lob;
assign io_exe_reqs_0_bits_uop_taken = exe_reg_uops_0_taken;
assign io_exe_reqs_0_bits_uop_imm_packed = exe_reg_uops_0_imm_packed;
assign io_exe_reqs_0_bits_uop_csr_addr = exe_reg_uops_0_csr_addr;
assign io_exe_reqs_0_bits_uop_rob_idx = exe_reg_uops_0_rob_idx;
assign io_exe_reqs_0_bits_uop_ldq_idx = exe_reg_uops_0_ldq_idx;
assign io_exe_reqs_0_bits_uop_stq_idx = exe_reg_uops_0_stq_idx;
assign io_exe_reqs_0_bits_uop_rxq_idx = exe_reg_uops_0_rxq_idx;
assign io_exe_reqs_0_bits_uop_pdst = exe_reg_uops_0_pdst;
assign io_exe_reqs_0_bits_uop_prs1 = exe_reg_uops_0_prs1;
assign io_exe_reqs_0_bits_uop_prs2 = exe_reg_uops_0_prs2;
assign io_exe_reqs_0_bits_uop_prs3 = exe_reg_uops_0_prs3;
assign io_exe_reqs_0_bits_uop_ppred = exe_reg_uops_0_ppred;
assign io_exe_reqs_0_bits_uop_prs1_busy = exe_reg_uops_0_prs1_busy;
assign io_exe_reqs_0_bits_uop_prs2_busy = exe_reg_uops_0_prs2_busy;
assign io_exe_reqs_0_bits_uop_prs3_busy = exe_reg_uops_0_prs3_busy;
assign io_exe_reqs_0_bits_uop_ppred_busy = exe_reg_uops_0_ppred_busy;
assign io_exe_reqs_0_bits_uop_stale_pdst = exe_reg_uops_0_stale_pdst;
assign io_exe_reqs_0_bits_uop_exception = exe_reg_uops_0_exception;
assign io_exe_reqs_0_bits_uop_exc_cause = exe_reg_uops_0_exc_cause;
assign io_exe_reqs_0_bits_uop_bypassable = exe_reg_uops_0_bypassable;
assign io_exe_reqs_0_bits_uop_mem_cmd = exe_reg_uops_0_mem_cmd;
assign io_exe_reqs_0_bits_uop_mem_size = exe_reg_uops_0_mem_size;
assign io_exe_reqs_0_bits_uop_mem_signed = exe_reg_uops_0_mem_signed;
assign io_exe_reqs_0_bits_uop_is_fence = exe_reg_uops_0_is_fence;
assign io_exe_reqs_0_bits_uop_is_fencei = exe_reg_uops_0_is_fencei;
assign io_exe_reqs_0_bits_uop_is_amo = exe_reg_uops_0_is_amo;
assign io_exe_reqs_0_bits_uop_uses_ldq = exe_reg_uops_0_uses_ldq;
assign io_exe_reqs_0_bits_uop_uses_stq = exe_reg_uops_0_uses_stq;
assign io_exe_reqs_0_bits_uop_is_sys_pc2epc = exe_reg_uops_0_is_sys_pc2epc;
assign io_exe_reqs_0_bits_uop_is_unique = exe_reg_uops_0_is_unique;
assign io_exe_reqs_0_bits_uop_flush_on_commit = exe_reg_uops_0_flush_on_commit;
assign io_exe_reqs_0_bits_uop_ldst_is_rs1 = exe_reg_uops_0_ldst_is_rs1;
assign io_exe_reqs_0_bits_uop_ldst = exe_reg_uops_0_ldst;
assign io_exe_reqs_0_bits_uop_lrs1 = exe_reg_uops_0_lrs1;
assign io_exe_reqs_0_bits_uop_lrs2 = exe_reg_uops_0_lrs2;
assign io_exe_reqs_0_bits_uop_lrs3 = exe_reg_uops_0_lrs3;
assign io_exe_reqs_0_bits_uop_ldst_val = exe_reg_uops_0_ldst_val;
assign io_exe_reqs_0_bits_uop_dst_rtype = exe_reg_uops_0_dst_rtype;
assign io_exe_reqs_0_bits_uop_lrs1_rtype = exe_reg_uops_0_lrs1_rtype;
assign io_exe_reqs_0_bits_uop_lrs2_rtype = exe_reg_uops_0_lrs2_rtype;
assign io_exe_reqs_0_bits_uop_frs3_en = exe_reg_uops_0_frs3_en;
assign io_exe_reqs_0_bits_uop_fp_val = exe_reg_uops_0_fp_val;
assign io_exe_reqs_0_bits_uop_fp_single = exe_reg_uops_0_fp_single;
assign io_exe_reqs_0_bits_uop_xcpt_pf_if = exe_reg_uops_0_xcpt_pf_if;
assign io_exe_reqs_0_bits_uop_xcpt_ae_if = exe_reg_uops_0_xcpt_ae_if;
assign io_exe_reqs_0_bits_uop_xcpt_ma_if = exe_reg_uops_0_xcpt_ma_if;
assign io_exe_reqs_0_bits_uop_bp_debug_if = exe_reg_uops_0_bp_debug_if;
assign io_exe_reqs_0_bits_uop_bp_xcpt_if = exe_reg_uops_0_bp_xcpt_if;
assign io_exe_reqs_0_bits_uop_debug_fsrc = exe_reg_uops_0_debug_fsrc;
assign io_exe_reqs_0_bits_uop_debug_tsrc = exe_reg_uops_0_debug_tsrc;
assign io_exe_reqs_0_bits_rs1_data = exe_reg_rs1_data_0;
assign io_exe_reqs_0_bits_rs2_data = exe_reg_rs2_data_0;
assign io_exe_reqs_0_bits_rs3_data = exe_reg_rs3_data_0;
endmodule |
Generate the Verilog code corresponding to this Chisel code //******************************************************************************
// Copyright (c) 2012 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// RISCV Processor Datapath: Rename Logic
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// Supports 1-cycle and 2-cycle latencies. (aka, passthrough versus registers between ren1 and ren2).
// - ren1: read the map tables and allocate a new physical register from the freelist.
// - ren2: read the busy table for the physical operands.
//
// Ren1 data is provided as an output to be fed directly into the ROB.
package boom.v3.exu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import boom.v3.common._
import boom.v3.util._
/**
* IO bundle to interface with the Register Rename logic
*
* @param plWidth pipeline width
* @param numIntPregs number of int physical registers
* @param numFpPregs number of FP physical registers
* @param numWbPorts number of int writeback ports
* @param numWbPorts number of FP writeback ports
*/
class RenameStageIO(
val plWidth: Int,
val numPhysRegs: Int,
val numWbPorts: Int)
(implicit p: Parameters) extends BoomBundle
/**
* IO bundle to debug the rename stage
*/
class DebugRenameStageIO(val numPhysRegs: Int)(implicit p: Parameters) extends BoomBundle
{
val freelist = Bits(numPhysRegs.W)
val isprlist = Bits(numPhysRegs.W)
val busytable = UInt(numPhysRegs.W)
}
abstract class AbstractRenameStage(
plWidth: Int,
numPhysRegs: Int,
numWbPorts: Int)
(implicit p: Parameters) extends BoomModule
{
val io = IO(new Bundle {
val ren_stalls = Output(Vec(plWidth, Bool()))
val kill = Input(Bool())
val dec_fire = Input(Vec(plWidth, Bool())) // will commit state updates
val dec_uops = Input(Vec(plWidth, new MicroOp()))
// physical specifiers available AND busy/ready status available.
val ren2_mask = Vec(plWidth, Output(Bool())) // mask of valid instructions
val ren2_uops = Vec(plWidth, Output(new MicroOp()))
// branch resolution (execute)
val brupdate = Input(new BrUpdateInfo())
val dis_fire = Input(Vec(coreWidth, Bool()))
val dis_ready = Input(Bool())
// wakeup ports
val wakeups = Flipped(Vec(numWbPorts, Valid(new ExeUnitResp(xLen))))
// commit stage
val com_valids = Input(Vec(plWidth, Bool()))
val com_uops = Input(Vec(plWidth, new MicroOp()))
val rbk_valids = Input(Vec(plWidth, Bool()))
val rollback = Input(Bool())
val debug_rob_empty = Input(Bool())
val debug = Output(new DebugRenameStageIO(numPhysRegs))
})
io.ren_stalls.foreach(_ := false.B)
io.debug := DontCare
def BypassAllocations(uop: MicroOp, older_uops: Seq[MicroOp], alloc_reqs: Seq[Bool]): MicroOp
//-------------------------------------------------------------
// Pipeline State & Wires
// Stage 1
val ren1_fire = Wire(Vec(plWidth, Bool()))
val ren1_uops = Wire(Vec(plWidth, new MicroOp))
// Stage 2
val ren2_fire = io.dis_fire
val ren2_ready = io.dis_ready
val ren2_valids = Wire(Vec(plWidth, Bool()))
val ren2_uops = Wire(Vec(plWidth, new MicroOp))
val ren2_alloc_reqs = Wire(Vec(plWidth, Bool()))
//-------------------------------------------------------------
// pipeline registers
for (w <- 0 until plWidth) {
ren1_fire(w) := io.dec_fire(w)
ren1_uops(w) := io.dec_uops(w)
}
for (w <- 0 until plWidth) {
val r_valid = RegInit(false.B)
val r_uop = Reg(new MicroOp)
val next_uop = Wire(new MicroOp)
next_uop := r_uop
when (io.kill) {
r_valid := false.B
} .elsewhen (ren2_ready) {
r_valid := ren1_fire(w)
next_uop := ren1_uops(w)
} .otherwise {
r_valid := r_valid && !ren2_fire(w) // clear bit if uop gets dispatched
next_uop := r_uop
}
r_uop := GetNewUopAndBrMask(BypassAllocations(next_uop, ren2_uops, ren2_alloc_reqs), io.brupdate)
ren2_valids(w) := r_valid
ren2_uops(w) := r_uop
}
//-------------------------------------------------------------
// Outputs
io.ren2_mask := ren2_valids
}
/**
* Rename stage that connets the map table, free list, and busy table.
* Can be used in both the FP pipeline and the normal execute pipeline.
*
* @param plWidth pipeline width
* @param numWbPorts number of int writeback ports
* @param numWbPorts number of FP writeback ports
*/
class RenameStage(
plWidth: Int,
numPhysRegs: Int,
numWbPorts: Int,
float: Boolean)
(implicit p: Parameters) extends AbstractRenameStage(plWidth, numPhysRegs, numWbPorts)(p)
{
val pregSz = log2Ceil(numPhysRegs)
val rtype = if (float) RT_FLT else RT_FIX
//-------------------------------------------------------------
// Helper Functions
def BypassAllocations(uop: MicroOp, older_uops: Seq[MicroOp], alloc_reqs: Seq[Bool]): MicroOp = {
val bypassed_uop = Wire(new MicroOp)
bypassed_uop := uop
val bypass_hits_rs1 = (older_uops zip alloc_reqs) map { case (r,a) => a && r.ldst === uop.lrs1 }
val bypass_hits_rs2 = (older_uops zip alloc_reqs) map { case (r,a) => a && r.ldst === uop.lrs2 }
val bypass_hits_rs3 = (older_uops zip alloc_reqs) map { case (r,a) => a && r.ldst === uop.lrs3 }
val bypass_hits_dst = (older_uops zip alloc_reqs) map { case (r,a) => a && r.ldst === uop.ldst }
val bypass_sel_rs1 = PriorityEncoderOH(bypass_hits_rs1.reverse).reverse
val bypass_sel_rs2 = PriorityEncoderOH(bypass_hits_rs2.reverse).reverse
val bypass_sel_rs3 = PriorityEncoderOH(bypass_hits_rs3.reverse).reverse
val bypass_sel_dst = PriorityEncoderOH(bypass_hits_dst.reverse).reverse
val do_bypass_rs1 = bypass_hits_rs1.reduce(_||_)
val do_bypass_rs2 = bypass_hits_rs2.reduce(_||_)
val do_bypass_rs3 = bypass_hits_rs3.reduce(_||_)
val do_bypass_dst = bypass_hits_dst.reduce(_||_)
val bypass_pdsts = older_uops.map(_.pdst)
when (do_bypass_rs1) { bypassed_uop.prs1 := Mux1H(bypass_sel_rs1, bypass_pdsts) }
when (do_bypass_rs2) { bypassed_uop.prs2 := Mux1H(bypass_sel_rs2, bypass_pdsts) }
when (do_bypass_rs3) { bypassed_uop.prs3 := Mux1H(bypass_sel_rs3, bypass_pdsts) }
when (do_bypass_dst) { bypassed_uop.stale_pdst := Mux1H(bypass_sel_dst, bypass_pdsts) }
bypassed_uop.prs1_busy := uop.prs1_busy || do_bypass_rs1
bypassed_uop.prs2_busy := uop.prs2_busy || do_bypass_rs2
bypassed_uop.prs3_busy := uop.prs3_busy || do_bypass_rs3
if (!float) {
bypassed_uop.prs3 := DontCare
bypassed_uop.prs3_busy := false.B
}
bypassed_uop
}
//-------------------------------------------------------------
// Rename Structures
val maptable = Module(new RenameMapTable(
plWidth,
32,
numPhysRegs,
false,
float))
val freelist = Module(new RenameFreeList(
plWidth,
numPhysRegs,
if (float) 32 else 31))
val busytable = Module(new RenameBusyTable(
plWidth,
numPhysRegs,
numWbPorts,
false,
float))
val ren2_br_tags = Wire(Vec(plWidth, Valid(UInt(brTagSz.W))))
// Commit/Rollback
val com_valids = Wire(Vec(plWidth, Bool()))
val rbk_valids = Wire(Vec(plWidth, Bool()))
for (w <- 0 until plWidth) {
ren2_alloc_reqs(w) := ren2_uops(w).ldst_val && ren2_uops(w).dst_rtype === rtype && ren2_fire(w)
ren2_br_tags(w).valid := ren2_fire(w) && ren2_uops(w).allocate_brtag
com_valids(w) := io.com_uops(w).ldst_val && io.com_uops(w).dst_rtype === rtype && io.com_valids(w)
rbk_valids(w) := io.com_uops(w).ldst_val && io.com_uops(w).dst_rtype === rtype && io.rbk_valids(w)
ren2_br_tags(w).bits := ren2_uops(w).br_tag
}
//-------------------------------------------------------------
// Rename Table
// Maptable inputs.
val map_reqs = Wire(Vec(plWidth, new MapReq(lregSz)))
val remap_reqs = Wire(Vec(plWidth, new RemapReq(lregSz, pregSz)))
// Generate maptable requests.
for ((((ren1,ren2),com),w) <- (ren1_uops zip ren2_uops zip io.com_uops.reverse).zipWithIndex) {
map_reqs(w).lrs1 := ren1.lrs1
map_reqs(w).lrs2 := ren1.lrs2
map_reqs(w).lrs3 := ren1.lrs3
map_reqs(w).ldst := ren1.ldst
remap_reqs(w).ldst := Mux(io.rollback, com.ldst , ren2.ldst)
remap_reqs(w).pdst := Mux(io.rollback, com.stale_pdst, ren2.pdst)
}
ren2_alloc_reqs zip rbk_valids.reverse zip remap_reqs map {
case ((a,r),rr) => rr.valid := a || r}
// Hook up inputs.
maptable.io.map_reqs := map_reqs
maptable.io.remap_reqs := remap_reqs
maptable.io.ren_br_tags := ren2_br_tags
maptable.io.brupdate := io.brupdate
maptable.io.rollback := io.rollback
// Maptable outputs.
for ((uop, w) <- ren1_uops.zipWithIndex) {
val mappings = maptable.io.map_resps(w)
uop.prs1 := mappings.prs1
uop.prs2 := mappings.prs2
uop.prs3 := mappings.prs3 // only FP has 3rd operand
uop.stale_pdst := mappings.stale_pdst
}
//-------------------------------------------------------------
// Free List
// Freelist inputs.
freelist.io.reqs := ren2_alloc_reqs
freelist.io.dealloc_pregs zip com_valids zip rbk_valids map
{case ((d,c),r) => d.valid := c || r}
freelist.io.dealloc_pregs zip io.com_uops map
{case (d,c) => d.bits := Mux(io.rollback, c.pdst, c.stale_pdst)}
freelist.io.ren_br_tags := ren2_br_tags
freelist.io.brupdate := io.brupdate
freelist.io.debug.pipeline_empty := io.debug_rob_empty
assert (ren2_alloc_reqs zip freelist.io.alloc_pregs map {case (r,p) => !r || p.bits =/= 0.U} reduce (_&&_),
"[rename-stage] A uop is trying to allocate the zero physical register.")
// Freelist outputs.
for ((uop, w) <- ren2_uops.zipWithIndex) {
val preg = freelist.io.alloc_pregs(w).bits
uop.pdst := Mux(uop.ldst =/= 0.U || float.B, preg, 0.U)
}
//-------------------------------------------------------------
// Busy Table
busytable.io.ren_uops := ren2_uops // expects pdst to be set up.
busytable.io.rebusy_reqs := ren2_alloc_reqs
busytable.io.wb_valids := io.wakeups.map(_.valid)
busytable.io.wb_pdsts := io.wakeups.map(_.bits.uop.pdst)
assert (!(io.wakeups.map(x => x.valid && x.bits.uop.dst_rtype =/= rtype).reduce(_||_)),
"[rename] Wakeup has wrong rtype.")
for ((uop, w) <- ren2_uops.zipWithIndex) {
val busy = busytable.io.busy_resps(w)
uop.prs1_busy := uop.lrs1_rtype === rtype && busy.prs1_busy
uop.prs2_busy := uop.lrs2_rtype === rtype && busy.prs2_busy
uop.prs3_busy := uop.frs3_en && busy.prs3_busy
val valid = ren2_valids(w)
assert (!(valid && busy.prs1_busy && rtype === RT_FIX && uop.lrs1 === 0.U), "[rename] x0 is busy??")
assert (!(valid && busy.prs2_busy && rtype === RT_FIX && uop.lrs2 === 0.U), "[rename] x0 is busy??")
}
//-------------------------------------------------------------
// Outputs
for (w <- 0 until plWidth) {
val can_allocate = freelist.io.alloc_pregs(w).valid
// Push back against Decode stage if Rename1 can't proceed.
io.ren_stalls(w) := (ren2_uops(w).dst_rtype === rtype) && !can_allocate
val bypassed_uop = Wire(new MicroOp)
if (w > 0) bypassed_uop := BypassAllocations(ren2_uops(w), ren2_uops.slice(0,w), ren2_alloc_reqs.slice(0,w))
else bypassed_uop := ren2_uops(w)
io.ren2_uops(w) := GetNewUopAndBrMask(bypassed_uop, io.brupdate)
}
//-------------------------------------------------------------
// Debug signals
io.debug.freelist := freelist.io.debug.freelist
io.debug.isprlist := freelist.io.debug.isprlist
io.debug.busytable := busytable.io.debug.busytable
}
class PredRenameStage(
plWidth: Int,
numPhysRegs: Int,
numWbPorts: Int)
(implicit p: Parameters) extends AbstractRenameStage(plWidth, numPhysRegs, numWbPorts)(p)
{
def BypassAllocations(uop: MicroOp, older_uops: Seq[MicroOp], alloc_reqs: Seq[Bool]): MicroOp = {
uop
}
ren2_alloc_reqs := DontCare
val busy_table = RegInit(VecInit(0.U(ftqSz.W).asBools))
val to_busy = WireInit(VecInit(0.U(ftqSz.W).asBools))
val unbusy = WireInit(VecInit(0.U(ftqSz.W).asBools))
val current_ftq_idx = Reg(UInt(log2Ceil(ftqSz).W))
var next_ftq_idx = current_ftq_idx
for (w <- 0 until plWidth) {
io.ren2_uops(w) := ren2_uops(w)
val is_sfb_br = ren2_uops(w).is_sfb_br && ren2_fire(w)
val is_sfb_shadow = ren2_uops(w).is_sfb_shadow && ren2_fire(w)
val ftq_idx = ren2_uops(w).ftq_idx
when (is_sfb_br) {
io.ren2_uops(w).pdst := ftq_idx
to_busy(ftq_idx) := true.B
}
next_ftq_idx = Mux(is_sfb_br, ftq_idx, next_ftq_idx)
when (is_sfb_shadow) {
io.ren2_uops(w).ppred := next_ftq_idx
io.ren2_uops(w).ppred_busy := (busy_table(next_ftq_idx) || to_busy(next_ftq_idx)) && !unbusy(next_ftq_idx)
}
}
for (w <- 0 until numWbPorts) {
when (io.wakeups(w).valid) {
unbusy(io.wakeups(w).bits.uop.pdst) := true.B
}
}
current_ftq_idx := next_ftq_idx
busy_table := ((busy_table.asUInt | to_busy.asUInt) & ~unbusy.asUInt).asBools
} | module RenameStage(
input clock,
input reset,
output io_ren_stalls_0,
input io_kill,
input io_dec_fire_0,
input [6:0] io_dec_uops_0_uopc,
input [31:0] io_dec_uops_0_inst,
input [31:0] io_dec_uops_0_debug_inst,
input io_dec_uops_0_is_rvc,
input [39:0] io_dec_uops_0_debug_pc,
input [2:0] io_dec_uops_0_iq_type,
input [9:0] io_dec_uops_0_fu_code,
input io_dec_uops_0_is_br,
input io_dec_uops_0_is_jalr,
input io_dec_uops_0_is_jal,
input io_dec_uops_0_is_sfb,
input [7:0] io_dec_uops_0_br_mask,
input [2:0] io_dec_uops_0_br_tag,
input [3:0] io_dec_uops_0_ftq_idx,
input io_dec_uops_0_edge_inst,
input [5:0] io_dec_uops_0_pc_lob,
input io_dec_uops_0_taken,
input [19:0] io_dec_uops_0_imm_packed,
input io_dec_uops_0_exception,
input [63:0] io_dec_uops_0_exc_cause,
input io_dec_uops_0_bypassable,
input [4:0] io_dec_uops_0_mem_cmd,
input [1:0] io_dec_uops_0_mem_size,
input io_dec_uops_0_mem_signed,
input io_dec_uops_0_is_fence,
input io_dec_uops_0_is_fencei,
input io_dec_uops_0_is_amo,
input io_dec_uops_0_uses_ldq,
input io_dec_uops_0_uses_stq,
input io_dec_uops_0_is_sys_pc2epc,
input io_dec_uops_0_is_unique,
input io_dec_uops_0_flush_on_commit,
input [5:0] io_dec_uops_0_ldst,
input [5:0] io_dec_uops_0_lrs1,
input [5:0] io_dec_uops_0_lrs2,
input [5:0] io_dec_uops_0_lrs3,
input io_dec_uops_0_ldst_val,
input [1:0] io_dec_uops_0_dst_rtype,
input [1:0] io_dec_uops_0_lrs1_rtype,
input [1:0] io_dec_uops_0_lrs2_rtype,
input io_dec_uops_0_frs3_en,
input io_dec_uops_0_fp_val,
input io_dec_uops_0_fp_single,
input io_dec_uops_0_xcpt_pf_if,
input io_dec_uops_0_xcpt_ae_if,
input io_dec_uops_0_bp_debug_if,
input io_dec_uops_0_bp_xcpt_if,
input [1:0] io_dec_uops_0_debug_fsrc,
output io_ren2_mask_0,
output [6:0] io_ren2_uops_0_uopc,
output [31:0] io_ren2_uops_0_inst,
output [31:0] io_ren2_uops_0_debug_inst,
output io_ren2_uops_0_is_rvc,
output [39:0] io_ren2_uops_0_debug_pc,
output [2:0] io_ren2_uops_0_iq_type,
output [9:0] io_ren2_uops_0_fu_code,
output [3:0] io_ren2_uops_0_ctrl_br_type,
output [1:0] io_ren2_uops_0_ctrl_op1_sel,
output [2:0] io_ren2_uops_0_ctrl_op2_sel,
output [2:0] io_ren2_uops_0_ctrl_imm_sel,
output [4:0] io_ren2_uops_0_ctrl_op_fcn,
output io_ren2_uops_0_ctrl_fcn_dw,
output [2:0] io_ren2_uops_0_ctrl_csr_cmd,
output io_ren2_uops_0_ctrl_is_load,
output io_ren2_uops_0_ctrl_is_sta,
output io_ren2_uops_0_ctrl_is_std,
output [1:0] io_ren2_uops_0_iw_state,
output io_ren2_uops_0_iw_p1_poisoned,
output io_ren2_uops_0_iw_p2_poisoned,
output io_ren2_uops_0_is_br,
output io_ren2_uops_0_is_jalr,
output io_ren2_uops_0_is_jal,
output io_ren2_uops_0_is_sfb,
output [7:0] io_ren2_uops_0_br_mask,
output [2:0] io_ren2_uops_0_br_tag,
output [3:0] io_ren2_uops_0_ftq_idx,
output io_ren2_uops_0_edge_inst,
output [5:0] io_ren2_uops_0_pc_lob,
output io_ren2_uops_0_taken,
output [19:0] io_ren2_uops_0_imm_packed,
output [11:0] io_ren2_uops_0_csr_addr,
output [1:0] io_ren2_uops_0_rxq_idx,
output [5:0] io_ren2_uops_0_pdst,
output [5:0] io_ren2_uops_0_prs1,
output [5:0] io_ren2_uops_0_prs2,
output io_ren2_uops_0_prs1_busy,
output io_ren2_uops_0_prs2_busy,
output [5:0] io_ren2_uops_0_stale_pdst,
output io_ren2_uops_0_exception,
output [63:0] io_ren2_uops_0_exc_cause,
output io_ren2_uops_0_bypassable,
output [4:0] io_ren2_uops_0_mem_cmd,
output [1:0] io_ren2_uops_0_mem_size,
output io_ren2_uops_0_mem_signed,
output io_ren2_uops_0_is_fence,
output io_ren2_uops_0_is_fencei,
output io_ren2_uops_0_is_amo,
output io_ren2_uops_0_uses_ldq,
output io_ren2_uops_0_uses_stq,
output io_ren2_uops_0_is_sys_pc2epc,
output io_ren2_uops_0_is_unique,
output io_ren2_uops_0_flush_on_commit,
output io_ren2_uops_0_ldst_is_rs1,
output [5:0] io_ren2_uops_0_ldst,
output [5:0] io_ren2_uops_0_lrs1,
output [5:0] io_ren2_uops_0_lrs2,
output [5:0] io_ren2_uops_0_lrs3,
output io_ren2_uops_0_ldst_val,
output [1:0] io_ren2_uops_0_dst_rtype,
output [1:0] io_ren2_uops_0_lrs1_rtype,
output [1:0] io_ren2_uops_0_lrs2_rtype,
output io_ren2_uops_0_frs3_en,
output io_ren2_uops_0_fp_val,
output io_ren2_uops_0_fp_single,
output io_ren2_uops_0_xcpt_pf_if,
output io_ren2_uops_0_xcpt_ae_if,
output io_ren2_uops_0_xcpt_ma_if,
output io_ren2_uops_0_bp_debug_if,
output io_ren2_uops_0_bp_xcpt_if,
output [1:0] io_ren2_uops_0_debug_fsrc,
output [1:0] io_ren2_uops_0_debug_tsrc,
input [7:0] io_brupdate_b1_resolve_mask,
input [2:0] io_brupdate_b2_uop_br_tag,
input io_brupdate_b2_mispredict,
input io_dis_fire_0,
input io_dis_ready,
input io_wakeups_0_valid,
input [5:0] io_wakeups_0_bits_uop_pdst,
input [1:0] io_wakeups_0_bits_uop_dst_rtype,
input io_wakeups_1_valid,
input [5:0] io_wakeups_1_bits_uop_pdst,
input [1:0] io_wakeups_1_bits_uop_dst_rtype,
input io_wakeups_2_valid,
input [5:0] io_wakeups_2_bits_uop_pdst,
input [1:0] io_wakeups_2_bits_uop_dst_rtype,
input io_com_valids_0,
input [5:0] io_com_uops_0_pdst,
input [5:0] io_com_uops_0_stale_pdst,
input [5:0] io_com_uops_0_ldst,
input io_com_uops_0_ldst_val,
input [1:0] io_com_uops_0_dst_rtype,
input io_rbk_valids_0,
input io_rollback,
input io_debug_rob_empty
);
wire [5:0] bypassed_uop_pdst;
wire _busytable_io_busy_resps_0_prs1_busy;
wire _busytable_io_busy_resps_0_prs2_busy;
wire _freelist_io_alloc_pregs_0_valid;
wire [5:0] _freelist_io_alloc_pregs_0_bits;
wire [5:0] _maptable_io_map_resps_0_prs1;
wire [5:0] _maptable_io_map_resps_0_prs2;
wire [5:0] _maptable_io_map_resps_0_stale_pdst;
reg r_valid;
reg [6:0] r_uop_uopc;
reg [31:0] r_uop_inst;
reg [31:0] r_uop_debug_inst;
reg r_uop_is_rvc;
reg [39:0] r_uop_debug_pc;
reg [2:0] r_uop_iq_type;
reg [9:0] r_uop_fu_code;
reg [3:0] r_uop_ctrl_br_type;
reg [1:0] r_uop_ctrl_op1_sel;
reg [2:0] r_uop_ctrl_op2_sel;
reg [2:0] r_uop_ctrl_imm_sel;
reg [4:0] r_uop_ctrl_op_fcn;
reg r_uop_ctrl_fcn_dw;
reg [2:0] r_uop_ctrl_csr_cmd;
reg r_uop_ctrl_is_load;
reg r_uop_ctrl_is_sta;
reg r_uop_ctrl_is_std;
reg [1:0] r_uop_iw_state;
reg r_uop_iw_p1_poisoned;
reg r_uop_iw_p2_poisoned;
reg r_uop_is_br;
reg r_uop_is_jalr;
reg r_uop_is_jal;
reg r_uop_is_sfb;
reg [7:0] r_uop_br_mask;
reg [2:0] r_uop_br_tag;
reg [3:0] r_uop_ftq_idx;
reg r_uop_edge_inst;
reg [5:0] r_uop_pc_lob;
reg r_uop_taken;
reg [19:0] r_uop_imm_packed;
reg [11:0] r_uop_csr_addr;
reg [1:0] r_uop_rxq_idx;
reg [5:0] r_uop_prs1;
reg [5:0] r_uop_prs2;
reg [5:0] r_uop_stale_pdst;
reg r_uop_exception;
reg [63:0] r_uop_exc_cause;
reg r_uop_bypassable;
reg [4:0] r_uop_mem_cmd;
reg [1:0] r_uop_mem_size;
reg r_uop_mem_signed;
reg r_uop_is_fence;
reg r_uop_is_fencei;
reg r_uop_is_amo;
reg r_uop_uses_ldq;
reg r_uop_uses_stq;
reg r_uop_is_sys_pc2epc;
reg r_uop_is_unique;
reg r_uop_flush_on_commit;
reg r_uop_ldst_is_rs1;
reg [5:0] r_uop_ldst;
reg [5:0] r_uop_lrs1;
reg [5:0] r_uop_lrs2;
reg [5:0] r_uop_lrs3;
reg r_uop_ldst_val;
reg [1:0] r_uop_dst_rtype;
reg [1:0] r_uop_lrs1_rtype;
reg [1:0] r_uop_lrs2_rtype;
reg r_uop_frs3_en;
reg r_uop_fp_val;
reg r_uop_fp_single;
reg r_uop_xcpt_pf_if;
reg r_uop_xcpt_ae_if;
reg r_uop_xcpt_ma_if;
reg r_uop_bp_debug_if;
reg r_uop_bp_xcpt_if;
reg [1:0] r_uop_debug_fsrc;
reg [1:0] r_uop_debug_tsrc;
wire _io_ren_stalls_0_T = r_uop_dst_rtype == 2'h0;
wire freelist_io_reqs_0 = r_uop_ldst_val & _io_ren_stalls_0_T & io_dis_fire_0;
wire ren2_br_tags_0_valid = io_dis_fire_0 & (r_uop_is_br & ~r_uop_is_sfb | r_uop_is_jalr);
wire _rbk_valids_0_T = io_com_uops_0_dst_rtype == 2'h0;
wire rbk_valids_0 = io_com_uops_0_ldst_val & _rbk_valids_0_T & io_rbk_valids_0;
assign bypassed_uop_pdst = (|r_uop_ldst) ? _freelist_io_alloc_pregs_0_bits : 6'h0;
wire _GEN = io_kill | ~io_dis_ready;
always @(posedge clock) begin
if (reset)
r_valid <= 1'h0;
else
r_valid <= ~io_kill & (io_dis_ready ? io_dec_fire_0 : r_valid & ~io_dis_fire_0);
if (_GEN) begin
end
else begin
r_uop_uopc <= io_dec_uops_0_uopc;
r_uop_inst <= io_dec_uops_0_inst;
r_uop_debug_inst <= io_dec_uops_0_debug_inst;
r_uop_is_rvc <= io_dec_uops_0_is_rvc;
r_uop_debug_pc <= io_dec_uops_0_debug_pc;
r_uop_iq_type <= io_dec_uops_0_iq_type;
r_uop_fu_code <= io_dec_uops_0_fu_code;
r_uop_ctrl_br_type <= 4'h0;
r_uop_ctrl_op1_sel <= 2'h0;
r_uop_ctrl_op2_sel <= 3'h0;
r_uop_ctrl_imm_sel <= 3'h0;
r_uop_ctrl_op_fcn <= 5'h0;
end
r_uop_ctrl_fcn_dw <= _GEN & r_uop_ctrl_fcn_dw;
if (_GEN) begin
end
else
r_uop_ctrl_csr_cmd <= 3'h0;
r_uop_ctrl_is_load <= _GEN & r_uop_ctrl_is_load;
r_uop_ctrl_is_sta <= _GEN & r_uop_ctrl_is_sta;
r_uop_ctrl_is_std <= _GEN & r_uop_ctrl_is_std;
if (_GEN) begin
end
else
r_uop_iw_state <= 2'h0;
r_uop_iw_p1_poisoned <= _GEN & r_uop_iw_p1_poisoned;
r_uop_iw_p2_poisoned <= _GEN & r_uop_iw_p2_poisoned;
if (_GEN) begin
end
else begin
r_uop_is_br <= io_dec_uops_0_is_br;
r_uop_is_jalr <= io_dec_uops_0_is_jalr;
r_uop_is_jal <= io_dec_uops_0_is_jal;
r_uop_is_sfb <= io_dec_uops_0_is_sfb;
end
r_uop_br_mask <= (_GEN ? r_uop_br_mask : io_dec_uops_0_br_mask) & ~io_brupdate_b1_resolve_mask;
if (_GEN) begin
end
else begin
r_uop_br_tag <= io_dec_uops_0_br_tag;
r_uop_ftq_idx <= io_dec_uops_0_ftq_idx;
r_uop_edge_inst <= io_dec_uops_0_edge_inst;
r_uop_pc_lob <= io_dec_uops_0_pc_lob;
r_uop_taken <= io_dec_uops_0_taken;
r_uop_imm_packed <= io_dec_uops_0_imm_packed;
r_uop_csr_addr <= 12'h0;
r_uop_rxq_idx <= 2'h0;
end
if (freelist_io_reqs_0 & r_uop_ldst == (_GEN ? r_uop_lrs1 : io_dec_uops_0_lrs1))
r_uop_prs1 <= bypassed_uop_pdst;
else if (_GEN) begin
end
else
r_uop_prs1 <= _maptable_io_map_resps_0_prs1;
if (freelist_io_reqs_0 & r_uop_ldst == (_GEN ? r_uop_lrs2 : io_dec_uops_0_lrs2))
r_uop_prs2 <= bypassed_uop_pdst;
else if (_GEN) begin
end
else
r_uop_prs2 <= _maptable_io_map_resps_0_prs2;
if (freelist_io_reqs_0 & r_uop_ldst == (_GEN ? r_uop_ldst : io_dec_uops_0_ldst))
r_uop_stale_pdst <= bypassed_uop_pdst;
else if (_GEN) begin
end
else
r_uop_stale_pdst <= _maptable_io_map_resps_0_stale_pdst;
if (_GEN) begin
end
else begin
r_uop_exception <= io_dec_uops_0_exception;
r_uop_exc_cause <= io_dec_uops_0_exc_cause;
r_uop_bypassable <= io_dec_uops_0_bypassable;
r_uop_mem_cmd <= io_dec_uops_0_mem_cmd;
r_uop_mem_size <= io_dec_uops_0_mem_size;
r_uop_mem_signed <= io_dec_uops_0_mem_signed;
r_uop_is_fence <= io_dec_uops_0_is_fence;
r_uop_is_fencei <= io_dec_uops_0_is_fencei;
r_uop_is_amo <= io_dec_uops_0_is_amo;
r_uop_uses_ldq <= io_dec_uops_0_uses_ldq;
r_uop_uses_stq <= io_dec_uops_0_uses_stq;
r_uop_is_sys_pc2epc <= io_dec_uops_0_is_sys_pc2epc;
r_uop_is_unique <= io_dec_uops_0_is_unique;
r_uop_flush_on_commit <= io_dec_uops_0_flush_on_commit;
end
r_uop_ldst_is_rs1 <= _GEN & r_uop_ldst_is_rs1;
if (_GEN) begin
end
else begin
r_uop_ldst <= io_dec_uops_0_ldst;
r_uop_lrs1 <= io_dec_uops_0_lrs1;
r_uop_lrs2 <= io_dec_uops_0_lrs2;
r_uop_lrs3 <= io_dec_uops_0_lrs3;
r_uop_ldst_val <= io_dec_uops_0_ldst_val;
r_uop_dst_rtype <= io_dec_uops_0_dst_rtype;
r_uop_lrs1_rtype <= io_dec_uops_0_lrs1_rtype;
r_uop_lrs2_rtype <= io_dec_uops_0_lrs2_rtype;
r_uop_frs3_en <= io_dec_uops_0_frs3_en;
r_uop_fp_val <= io_dec_uops_0_fp_val;
r_uop_fp_single <= io_dec_uops_0_fp_single;
r_uop_xcpt_pf_if <= io_dec_uops_0_xcpt_pf_if;
r_uop_xcpt_ae_if <= io_dec_uops_0_xcpt_ae_if;
end
r_uop_xcpt_ma_if <= _GEN & r_uop_xcpt_ma_if;
if (_GEN) begin
end
else begin
r_uop_bp_debug_if <= io_dec_uops_0_bp_debug_if;
r_uop_bp_xcpt_if <= io_dec_uops_0_bp_xcpt_if;
r_uop_debug_fsrc <= io_dec_uops_0_debug_fsrc;
r_uop_debug_tsrc <= 2'h0;
end
end
RenameMapTable maptable (
.clock (clock),
.reset (reset),
.io_map_reqs_0_lrs1 (io_dec_uops_0_lrs1),
.io_map_reqs_0_lrs2 (io_dec_uops_0_lrs2),
.io_map_reqs_0_ldst (io_dec_uops_0_ldst),
.io_map_resps_0_prs1 (_maptable_io_map_resps_0_prs1),
.io_map_resps_0_prs2 (_maptable_io_map_resps_0_prs2),
.io_map_resps_0_stale_pdst (_maptable_io_map_resps_0_stale_pdst),
.io_remap_reqs_0_ldst (io_rollback ? io_com_uops_0_ldst : r_uop_ldst),
.io_remap_reqs_0_pdst (io_rollback ? io_com_uops_0_stale_pdst : bypassed_uop_pdst),
.io_remap_reqs_0_valid (freelist_io_reqs_0 | rbk_valids_0),
.io_ren_br_tags_0_valid (ren2_br_tags_0_valid),
.io_ren_br_tags_0_bits (r_uop_br_tag),
.io_brupdate_b2_uop_br_tag (io_brupdate_b2_uop_br_tag),
.io_brupdate_b2_mispredict (io_brupdate_b2_mispredict),
.io_rollback (io_rollback)
);
RenameFreeList freelist (
.clock (clock),
.reset (reset),
.io_reqs_0 (freelist_io_reqs_0),
.io_alloc_pregs_0_valid (_freelist_io_alloc_pregs_0_valid),
.io_alloc_pregs_0_bits (_freelist_io_alloc_pregs_0_bits),
.io_dealloc_pregs_0_valid (io_com_uops_0_ldst_val & _rbk_valids_0_T & io_com_valids_0 | rbk_valids_0),
.io_dealloc_pregs_0_bits (io_rollback ? io_com_uops_0_pdst : io_com_uops_0_stale_pdst),
.io_ren_br_tags_0_valid (ren2_br_tags_0_valid),
.io_ren_br_tags_0_bits (r_uop_br_tag),
.io_brupdate_b2_uop_br_tag (io_brupdate_b2_uop_br_tag),
.io_brupdate_b2_mispredict (io_brupdate_b2_mispredict),
.io_debug_pipeline_empty (io_debug_rob_empty)
);
RenameBusyTable busytable (
.clock (clock),
.reset (reset),
.io_ren_uops_0_pdst (bypassed_uop_pdst),
.io_ren_uops_0_prs1 (r_uop_prs1),
.io_ren_uops_0_prs2 (r_uop_prs2),
.io_busy_resps_0_prs1_busy (_busytable_io_busy_resps_0_prs1_busy),
.io_busy_resps_0_prs2_busy (_busytable_io_busy_resps_0_prs2_busy),
.io_rebusy_reqs_0 (freelist_io_reqs_0),
.io_wb_pdsts_0 (io_wakeups_0_bits_uop_pdst),
.io_wb_pdsts_1 (io_wakeups_1_bits_uop_pdst),
.io_wb_pdsts_2 (io_wakeups_2_bits_uop_pdst),
.io_wb_valids_0 (io_wakeups_0_valid),
.io_wb_valids_1 (io_wakeups_1_valid),
.io_wb_valids_2 (io_wakeups_2_valid)
);
assign io_ren_stalls_0 = _io_ren_stalls_0_T & ~_freelist_io_alloc_pregs_0_valid;
assign io_ren2_mask_0 = r_valid;
assign io_ren2_uops_0_uopc = r_uop_uopc;
assign io_ren2_uops_0_inst = r_uop_inst;
assign io_ren2_uops_0_debug_inst = r_uop_debug_inst;
assign io_ren2_uops_0_is_rvc = r_uop_is_rvc;
assign io_ren2_uops_0_debug_pc = r_uop_debug_pc;
assign io_ren2_uops_0_iq_type = r_uop_iq_type;
assign io_ren2_uops_0_fu_code = r_uop_fu_code;
assign io_ren2_uops_0_ctrl_br_type = r_uop_ctrl_br_type;
assign io_ren2_uops_0_ctrl_op1_sel = r_uop_ctrl_op1_sel;
assign io_ren2_uops_0_ctrl_op2_sel = r_uop_ctrl_op2_sel;
assign io_ren2_uops_0_ctrl_imm_sel = r_uop_ctrl_imm_sel;
assign io_ren2_uops_0_ctrl_op_fcn = r_uop_ctrl_op_fcn;
assign io_ren2_uops_0_ctrl_fcn_dw = r_uop_ctrl_fcn_dw;
assign io_ren2_uops_0_ctrl_csr_cmd = r_uop_ctrl_csr_cmd;
assign io_ren2_uops_0_ctrl_is_load = r_uop_ctrl_is_load;
assign io_ren2_uops_0_ctrl_is_sta = r_uop_ctrl_is_sta;
assign io_ren2_uops_0_ctrl_is_std = r_uop_ctrl_is_std;
assign io_ren2_uops_0_iw_state = r_uop_iw_state;
assign io_ren2_uops_0_iw_p1_poisoned = r_uop_iw_p1_poisoned;
assign io_ren2_uops_0_iw_p2_poisoned = r_uop_iw_p2_poisoned;
assign io_ren2_uops_0_is_br = r_uop_is_br;
assign io_ren2_uops_0_is_jalr = r_uop_is_jalr;
assign io_ren2_uops_0_is_jal = r_uop_is_jal;
assign io_ren2_uops_0_is_sfb = r_uop_is_sfb;
assign io_ren2_uops_0_br_mask = r_uop_br_mask & ~io_brupdate_b1_resolve_mask;
assign io_ren2_uops_0_br_tag = r_uop_br_tag;
assign io_ren2_uops_0_ftq_idx = r_uop_ftq_idx;
assign io_ren2_uops_0_edge_inst = r_uop_edge_inst;
assign io_ren2_uops_0_pc_lob = r_uop_pc_lob;
assign io_ren2_uops_0_taken = r_uop_taken;
assign io_ren2_uops_0_imm_packed = r_uop_imm_packed;
assign io_ren2_uops_0_csr_addr = r_uop_csr_addr;
assign io_ren2_uops_0_rxq_idx = r_uop_rxq_idx;
assign io_ren2_uops_0_pdst = bypassed_uop_pdst;
assign io_ren2_uops_0_prs1 = r_uop_prs1;
assign io_ren2_uops_0_prs2 = r_uop_prs2;
assign io_ren2_uops_0_prs1_busy = r_uop_lrs1_rtype == 2'h0 & _busytable_io_busy_resps_0_prs1_busy;
assign io_ren2_uops_0_prs2_busy = r_uop_lrs2_rtype == 2'h0 & _busytable_io_busy_resps_0_prs2_busy;
assign io_ren2_uops_0_stale_pdst = r_uop_stale_pdst;
assign io_ren2_uops_0_exception = r_uop_exception;
assign io_ren2_uops_0_exc_cause = r_uop_exc_cause;
assign io_ren2_uops_0_bypassable = r_uop_bypassable;
assign io_ren2_uops_0_mem_cmd = r_uop_mem_cmd;
assign io_ren2_uops_0_mem_size = r_uop_mem_size;
assign io_ren2_uops_0_mem_signed = r_uop_mem_signed;
assign io_ren2_uops_0_is_fence = r_uop_is_fence;
assign io_ren2_uops_0_is_fencei = r_uop_is_fencei;
assign io_ren2_uops_0_is_amo = r_uop_is_amo;
assign io_ren2_uops_0_uses_ldq = r_uop_uses_ldq;
assign io_ren2_uops_0_uses_stq = r_uop_uses_stq;
assign io_ren2_uops_0_is_sys_pc2epc = r_uop_is_sys_pc2epc;
assign io_ren2_uops_0_is_unique = r_uop_is_unique;
assign io_ren2_uops_0_flush_on_commit = r_uop_flush_on_commit;
assign io_ren2_uops_0_ldst_is_rs1 = r_uop_ldst_is_rs1;
assign io_ren2_uops_0_ldst = r_uop_ldst;
assign io_ren2_uops_0_lrs1 = r_uop_lrs1;
assign io_ren2_uops_0_lrs2 = r_uop_lrs2;
assign io_ren2_uops_0_lrs3 = r_uop_lrs3;
assign io_ren2_uops_0_ldst_val = r_uop_ldst_val;
assign io_ren2_uops_0_dst_rtype = r_uop_dst_rtype;
assign io_ren2_uops_0_lrs1_rtype = r_uop_lrs1_rtype;
assign io_ren2_uops_0_lrs2_rtype = r_uop_lrs2_rtype;
assign io_ren2_uops_0_frs3_en = r_uop_frs3_en;
assign io_ren2_uops_0_fp_val = r_uop_fp_val;
assign io_ren2_uops_0_fp_single = r_uop_fp_single;
assign io_ren2_uops_0_xcpt_pf_if = r_uop_xcpt_pf_if;
assign io_ren2_uops_0_xcpt_ae_if = r_uop_xcpt_ae_if;
assign io_ren2_uops_0_xcpt_ma_if = r_uop_xcpt_ma_if;
assign io_ren2_uops_0_bp_debug_if = r_uop_bp_debug_if;
assign io_ren2_uops_0_bp_xcpt_if = r_uop_bp_xcpt_if;
assign io_ren2_uops_0_debug_fsrc = r_uop_debug_fsrc;
assign io_ren2_uops_0_debug_tsrc = r_uop_debug_tsrc;
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
} | module OptimizationBarrier_EntryData(
input [19:0] io_x_ppn,
input io_x_u,
input io_x_ae,
input io_x_sw,
input io_x_sx,
input io_x_sr,
input io_x_pw,
input io_x_pr,
input io_x_pal,
input io_x_paa,
input io_x_eff,
input io_x_c,
output [19:0] io_y_ppn,
output io_y_u,
output io_y_ae,
output io_y_sw,
output io_y_sx,
output io_y_sr,
output io_y_pw,
output io_y_pr,
output io_y_pal,
output io_y_paa,
output io_y_eff,
output io_y_c
);
assign io_y_ppn = io_x_ppn;
assign io_y_u = io_x_u;
assign io_y_ae = io_x_ae;
assign io_y_sw = io_x_sw;
assign io_y_sx = io_x_sx;
assign io_y_sr = io_x_sr;
assign io_y_pw = io_x_pw;
assign io_y_pr = io_x_pr;
assign io_y_pal = io_x_pal;
assign io_y_paa = io_x_paa;
assign io_y_eff = io_x_eff;
assign io_y_c = io_x_c;
endmodule |
Generate the Verilog code corresponding to this Chisel code /*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
class SinkXRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val address = UInt(params.inner.bundle.addressBits.W)
}
class SinkX(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val req = Decoupled(new FullRequest(params))
val x = Flipped(Decoupled(new SinkXRequest(params)))
})
val x = Queue(io.x, 1)
val (tag, set, offset) = params.parseAddress(x.bits.address)
x.ready := io.req.ready
io.req.valid := x.valid
params.ccover(x.valid && !x.ready, "SINKX_STALL", "Backpressure when accepting a control message")
io.req.bits.prio := VecInit(1.U(3.W).asBools) // same prio as A
io.req.bits.control:= true.B
io.req.bits.opcode := 0.U
io.req.bits.param := 0.U
io.req.bits.size := params.offsetBits.U
// The source does not matter, because a flush command never allocates a way.
// However, it must be a legal source, otherwise assertions might spuriously fire.
io.req.bits.source := params.inner.client.clients.map(_.sourceId.start).min.U
io.req.bits.offset := 0.U
io.req.bits.set := set
io.req.bits.tag := tag
io.req.bits.put := 0.U
} | module SinkX(
input clock,
input reset,
input io_req_ready,
output io_req_valid,
output [12:0] io_req_bits_tag,
output [9:0] io_req_bits_set,
output io_x_ready,
input io_x_valid,
input [31:0] io_x_bits_address
);
wire [31:0] _x_q_io_deq_bits_address;
Queue1_SinkXRequest x_q (
.clock (clock),
.reset (reset),
.io_enq_ready (io_x_ready),
.io_enq_valid (io_x_valid),
.io_enq_bits_address (io_x_bits_address),
.io_deq_ready (io_req_ready),
.io_deq_valid (io_req_valid),
.io_deq_bits_address (_x_q_io_deq_bits_address)
);
assign io_req_bits_tag = {_x_q_io_deq_bits_address[31], _x_q_io_deq_bits_address[27:16]};
assign io_req_bits_set = _x_q_io_deq_bits_address[15:6];
endmodule |
Generate the Verilog code corresponding to this Chisel code package testchipip.serdes
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util._
import freechips.rocketchip.tilelink._
trait TLFieldHelper {
def getBodyFields(b: TLChannel): Seq[Data] = b match {
case b: TLBundleA => Seq(b.mask, b.data, b.corrupt)
case b: TLBundleB => Seq(b.mask, b.data, b.corrupt)
case b: TLBundleC => Seq( b.data, b.corrupt)
case b: TLBundleD => Seq( b.data, b.corrupt)
case b: TLBundleE => Seq()
}
def getConstFields(b: TLChannel): Seq[Data] = b match {
case b: TLBundleA => Seq(b.opcode, b.param, b.size, b.source, b.address, b.user, b.echo )
case b: TLBundleB => Seq(b.opcode, b.param, b.size, b.source, b.address )
case b: TLBundleC => Seq(b.opcode, b.param, b.size, b.source, b.address, b.user, b.echo )
case b: TLBundleD => Seq(b.opcode, b.param, b.size, b.source, b.user, b.echo, b.sink, b.denied)
case b: TLBundleE => Seq( b.sink )
}
def minTLPayloadWidth(b: TLChannel): Int = Seq(getBodyFields(b), getConstFields(b)).map(_.map(_.getWidth).sum).max
def minTLPayloadWidth(bs: Seq[TLChannel]): Int = bs.map(b => minTLPayloadWidth(b)).max
def minTLPayloadWidth(b: TLBundle): Int = minTLPayloadWidth(Seq(b.a, b.b, b.c, b.d, b.e).map(_.bits))
}
class TLBeat(val beatWidth: Int) extends Bundle {
val payload = UInt(beatWidth.W)
val head = Bool()
val tail = Bool()
}
abstract class TLChannelToBeat[T <: TLChannel](gen: => T, edge: TLEdge, nameSuffix: Option[String])(implicit val p: Parameters) extends Module with TLFieldHelper {
override def desiredName = (Seq(this.getClass.getSimpleName) ++ nameSuffix ++ Seq(gen.params.shortName)).mkString("_")
val beatWidth = minTLPayloadWidth(gen)
val io = IO(new Bundle {
val protocol = Flipped(Decoupled(gen))
val beat = Decoupled(new TLBeat(beatWidth))
})
def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B
// convert decoupled to irrevocable
val q = Module(new Queue(gen, 1, pipe=true, flow=true))
q.io.enq <> io.protocol
val protocol = q.io.deq
val has_body = Wire(Bool())
val body_fields = getBodyFields(protocol.bits)
val const_fields = getConstFields(protocol.bits)
val head = edge.first(protocol.bits, protocol.fire)
val tail = edge.last(protocol.bits, protocol.fire)
val body = Cat( body_fields.filter(_.getWidth > 0).map(_.asUInt))
val const = Cat(const_fields.filter(_.getWidth > 0).map(_.asUInt))
val is_body = RegInit(false.B)
io.beat.valid := protocol.valid
protocol.ready := io.beat.ready && (is_body || !has_body)
io.beat.bits.head := head && !is_body
io.beat.bits.tail := tail && (is_body || !has_body)
io.beat.bits.payload := Mux(is_body, body, const)
when (io.beat.fire && io.beat.bits.head) { is_body := true.B }
when (io.beat.fire && io.beat.bits.tail) { is_body := false.B }
}
abstract class TLChannelFromBeat[T <: TLChannel](gen: => T, nameSuffix: Option[String])(implicit val p: Parameters) extends Module with TLFieldHelper {
override def desiredName = (Seq(this.getClass.getSimpleName) ++ nameSuffix ++ Seq(gen.params.shortName)).mkString("_")
val beatWidth = minTLPayloadWidth(gen)
val io = IO(new Bundle {
val protocol = Decoupled(gen)
val beat = Flipped(Decoupled(new TLBeat(beatWidth)))
})
// Handle size = 1 gracefully (Chisel3 empty range is broken)
def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0)
val protocol = Wire(Decoupled(gen))
io.protocol <> protocol
val body_fields = getBodyFields(protocol.bits)
val const_fields = getConstFields(protocol.bits)
val is_const = RegInit(true.B)
val const_reg = Reg(UInt(const_fields.map(_.getWidth).sum.W))
val const = Mux(io.beat.bits.head, io.beat.bits.payload, const_reg)
io.beat.ready := (is_const && !io.beat.bits.tail) || protocol.ready
protocol.valid := (!is_const || io.beat.bits.tail) && io.beat.valid
def assign(i: UInt, sigs: Seq[Data]) = {
var t = i
for (s <- sigs.reverse) {
s := t.asTypeOf(s.cloneType)
t = t >> s.getWidth
}
}
assign(const, const_fields)
assign(io.beat.bits.payload, body_fields)
when (io.beat.fire && io.beat.bits.head) { is_const := false.B; const_reg := io.beat.bits.payload }
when (io.beat.fire && io.beat.bits.tail) { is_const := true.B }
}
class TLAToBeat(edgeIn: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleA(bundle), edgeIn, nameSuffix)(p) {
has_body := edgeIn.hasData(protocol.bits) || (~protocol.bits.mask =/= 0.U)
}
class TLAFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleA(bundle), nameSuffix)(p) {
when (io.beat.bits.head) { io.protocol.bits.mask := ~(0.U(io.protocol.bits.mask.getWidth.W)) }
}
class TLBToBeat(edgeOut: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleB(bundle), edgeOut, nameSuffix)(p) {
has_body := edgeOut.hasData(protocol.bits) || (~protocol.bits.mask =/= 0.U)
}
class TLBFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleB(bundle), nameSuffix)(p) {
when (io.beat.bits.head) { io.protocol.bits.mask := ~(0.U(io.protocol.bits.mask.getWidth.W)) }
}
class TLCToBeat(edgeIn: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleC(bundle), edgeIn, nameSuffix)(p) {
has_body := edgeIn.hasData(protocol.bits)
}
class TLCFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleC(bundle), nameSuffix)(p)
class TLDToBeat(edgeOut: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleD(bundle), edgeOut, nameSuffix)(p) {
has_body := edgeOut.hasData(protocol.bits)
}
class TLDFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleD(bundle), nameSuffix)(p)
class TLEToBeat(edgeIn: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleE(bundle), edgeIn, nameSuffix)(p) {
has_body := edgeIn.hasData(protocol.bits)
}
class TLEFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleE(bundle), nameSuffix)(p) | module TLEFromBeat_serial_tl_0_a64d64s8k8z8c(
input clock,
input reset,
output io_beat_ready,
input io_beat_valid,
input io_beat_bits_head,
input io_beat_bits_tail
);
reg is_const;
wire io_beat_ready_0 = is_const & ~io_beat_bits_tail;
wire _GEN = io_beat_ready_0 & io_beat_valid;
always @(posedge clock) begin
if (reset)
is_const <= 1'h1;
else
is_const <= _GEN & io_beat_bits_tail | ~(_GEN & io_beat_bits_head) & is_const;
end
assign io_beat_ready = io_beat_ready_0;
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tile
import chisel3._
import chisel3.util._
import chisel3.{DontCare, WireInit, withClock, withReset}
import chisel3.experimental.SourceInfo
import chisel3.experimental.dataview._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.rocket._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property
case class FPUParams(
minFLen: Int = 32,
fLen: Int = 64,
divSqrt: Boolean = true,
sfmaLatency: Int = 3,
dfmaLatency: Int = 4,
fpmuLatency: Int = 2,
ifpuLatency: Int = 2
)
object FPConstants
{
val RM_SZ = 3
val FLAGS_SZ = 5
}
trait HasFPUCtrlSigs {
val ldst = Bool()
val wen = Bool()
val ren1 = Bool()
val ren2 = Bool()
val ren3 = Bool()
val swap12 = Bool()
val swap23 = Bool()
val typeTagIn = UInt(2.W)
val typeTagOut = UInt(2.W)
val fromint = Bool()
val toint = Bool()
val fastpipe = Bool()
val fma = Bool()
val div = Bool()
val sqrt = Bool()
val wflags = Bool()
val vec = Bool()
}
class FPUCtrlSigs extends Bundle with HasFPUCtrlSigs
class FPUDecoder(implicit p: Parameters) extends FPUModule()(p) {
val io = IO(new Bundle {
val inst = Input(Bits(32.W))
val sigs = Output(new FPUCtrlSigs())
})
private val X2 = BitPat.dontCare(2)
val default = List(X,X,X,X,X,X,X,X2,X2,X,X,X,X,X,X,X,N)
val h: Array[(BitPat, List[BitPat])] =
Array(FLH -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSH -> List(Y,N,N,Y,N,Y,X, I, H,N,Y,N,N,N,N,N,N),
FMV_H_X -> List(N,Y,N,N,N,X,X, H, I,Y,N,N,N,N,N,N,N),
FCVT_H_W -> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_WU-> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_L -> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_LU-> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FMV_X_H -> List(N,N,Y,N,N,N,X, I, H,N,Y,N,N,N,N,N,N),
FCLASS_H -> List(N,N,Y,N,N,N,X, H, H,N,Y,N,N,N,N,N,N),
FCVT_W_H -> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_H-> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_H -> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_H-> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_S_H -> List(N,Y,Y,N,N,N,X, H, S,N,N,Y,N,N,N,Y,N),
FCVT_H_S -> List(N,Y,Y,N,N,N,X, S, H,N,N,Y,N,N,N,Y,N),
FEQ_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FLT_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FLE_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FSGNJ_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FSGNJN_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FSGNJX_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FMIN_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,Y,N),
FMAX_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,Y,N),
FADD_H -> List(N,Y,Y,Y,N,N,Y, H, H,N,N,N,Y,N,N,Y,N),
FSUB_H -> List(N,Y,Y,Y,N,N,Y, H, H,N,N,N,Y,N,N,Y,N),
FMUL_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,N,Y,N,N,Y,N),
FMADD_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FMSUB_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FNMADD_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FNMSUB_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FDIV_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,N,N,Y,N,Y,N),
FSQRT_H -> List(N,Y,Y,N,N,N,X, H, H,N,N,N,N,N,Y,Y,N))
val f: Array[(BitPat, List[BitPat])] =
Array(FLW -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSW -> List(Y,N,N,Y,N,Y,X, I, S,N,Y,N,N,N,N,N,N),
FMV_W_X -> List(N,Y,N,N,N,X,X, S, I,Y,N,N,N,N,N,N,N),
FCVT_S_W -> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_WU-> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_L -> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_LU-> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FMV_X_W -> List(N,N,Y,N,N,N,X, I, S,N,Y,N,N,N,N,N,N),
FCLASS_S -> List(N,N,Y,N,N,N,X, S, S,N,Y,N,N,N,N,N,N),
FCVT_W_S -> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_S-> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_S -> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_S-> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FEQ_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FLT_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FLE_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FSGNJ_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FSGNJN_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FSGNJX_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FMIN_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,Y,N),
FMAX_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,Y,N),
FADD_S -> List(N,Y,Y,Y,N,N,Y, S, S,N,N,N,Y,N,N,Y,N),
FSUB_S -> List(N,Y,Y,Y,N,N,Y, S, S,N,N,N,Y,N,N,Y,N),
FMUL_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,N,Y,N,N,Y,N),
FMADD_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FMSUB_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FNMADD_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FNMSUB_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FDIV_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,N,N,Y,N,Y,N),
FSQRT_S -> List(N,Y,Y,N,N,N,X, S, S,N,N,N,N,N,Y,Y,N))
val d: Array[(BitPat, List[BitPat])] =
Array(FLD -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSD -> List(Y,N,N,Y,N,Y,X, I, D,N,Y,N,N,N,N,N,N),
FMV_D_X -> List(N,Y,N,N,N,X,X, D, I,Y,N,N,N,N,N,N,N),
FCVT_D_W -> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_WU-> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_L -> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_LU-> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FMV_X_D -> List(N,N,Y,N,N,N,X, I, D,N,Y,N,N,N,N,N,N),
FCLASS_D -> List(N,N,Y,N,N,N,X, D, D,N,Y,N,N,N,N,N,N),
FCVT_W_D -> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_D-> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_D -> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_D-> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_S_D -> List(N,Y,Y,N,N,N,X, D, S,N,N,Y,N,N,N,Y,N),
FCVT_D_S -> List(N,Y,Y,N,N,N,X, S, D,N,N,Y,N,N,N,Y,N),
FEQ_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FLT_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FLE_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FSGNJ_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FSGNJN_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FSGNJX_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FMIN_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,Y,N),
FMAX_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,Y,N),
FADD_D -> List(N,Y,Y,Y,N,N,Y, D, D,N,N,N,Y,N,N,Y,N),
FSUB_D -> List(N,Y,Y,Y,N,N,Y, D, D,N,N,N,Y,N,N,Y,N),
FMUL_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,N,Y,N,N,Y,N),
FMADD_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FMSUB_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FNMADD_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FNMSUB_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FDIV_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,N,N,Y,N,Y,N),
FSQRT_D -> List(N,Y,Y,N,N,N,X, D, D,N,N,N,N,N,Y,Y,N))
val fcvt_hd: Array[(BitPat, List[BitPat])] =
Array(FCVT_H_D -> List(N,Y,Y,N,N,N,X, D, H,N,N,Y,N,N,N,Y,N),
FCVT_D_H -> List(N,Y,Y,N,N,N,X, H, D,N,N,Y,N,N,N,Y,N))
val vfmv_f_s: Array[(BitPat, List[BitPat])] =
Array(VFMV_F_S -> List(N,Y,N,N,N,N,X,X2,X2,N,N,N,N,N,N,N,Y))
val insns = ((minFLen, fLen) match {
case (32, 32) => f
case (16, 32) => h ++ f
case (32, 64) => f ++ d
case (16, 64) => h ++ f ++ d ++ fcvt_hd
case other => throw new Exception(s"minFLen = ${minFLen} & fLen = ${fLen} is an unsupported configuration")
}) ++ (if (usingVector) vfmv_f_s else Array[(BitPat, List[BitPat])]())
val decoder = DecodeLogic(io.inst, default, insns)
val s = io.sigs
val sigs = Seq(s.ldst, s.wen, s.ren1, s.ren2, s.ren3, s.swap12,
s.swap23, s.typeTagIn, s.typeTagOut, s.fromint, s.toint,
s.fastpipe, s.fma, s.div, s.sqrt, s.wflags, s.vec)
sigs zip decoder map {case(s,d) => s := d}
}
class FPUCoreIO(implicit p: Parameters) extends CoreBundle()(p) {
val hartid = Input(UInt(hartIdLen.W))
val time = Input(UInt(xLen.W))
val inst = Input(Bits(32.W))
val fromint_data = Input(Bits(xLen.W))
val fcsr_rm = Input(Bits(FPConstants.RM_SZ.W))
val fcsr_flags = Valid(Bits(FPConstants.FLAGS_SZ.W))
val v_sew = Input(UInt(3.W))
val store_data = Output(Bits(fLen.W))
val toint_data = Output(Bits(xLen.W))
val ll_resp_val = Input(Bool())
val ll_resp_type = Input(Bits(3.W))
val ll_resp_tag = Input(UInt(5.W))
val ll_resp_data = Input(Bits(fLen.W))
val valid = Input(Bool())
val fcsr_rdy = Output(Bool())
val nack_mem = Output(Bool())
val illegal_rm = Output(Bool())
val killx = Input(Bool())
val killm = Input(Bool())
val dec = Output(new FPUCtrlSigs())
val sboard_set = Output(Bool())
val sboard_clr = Output(Bool())
val sboard_clra = Output(UInt(5.W))
val keep_clock_enabled = Input(Bool())
}
class FPUIO(implicit p: Parameters) extends FPUCoreIO ()(p) {
val cp_req = Flipped(Decoupled(new FPInput())) //cp doesn't pay attn to kill sigs
val cp_resp = Decoupled(new FPResult())
}
class FPResult(implicit p: Parameters) extends CoreBundle()(p) {
val data = Bits((fLen+1).W)
val exc = Bits(FPConstants.FLAGS_SZ.W)
}
class IntToFPInput(implicit p: Parameters) extends CoreBundle()(p) with HasFPUCtrlSigs {
val rm = Bits(FPConstants.RM_SZ.W)
val typ = Bits(2.W)
val in1 = Bits(xLen.W)
}
class FPInput(implicit p: Parameters) extends CoreBundle()(p) with HasFPUCtrlSigs {
val rm = Bits(FPConstants.RM_SZ.W)
val fmaCmd = Bits(2.W)
val typ = Bits(2.W)
val fmt = Bits(2.W)
val in1 = Bits((fLen+1).W)
val in2 = Bits((fLen+1).W)
val in3 = Bits((fLen+1).W)
}
case class FType(exp: Int, sig: Int) {
def ieeeWidth = exp + sig
def recodedWidth = ieeeWidth + 1
def ieeeQNaN = ((BigInt(1) << (ieeeWidth - 1)) - (BigInt(1) << (sig - 2))).U(ieeeWidth.W)
def qNaN = ((BigInt(7) << (exp + sig - 3)) + (BigInt(1) << (sig - 2))).U(recodedWidth.W)
def isNaN(x: UInt) = x(sig + exp - 1, sig + exp - 3).andR
def isSNaN(x: UInt) = isNaN(x) && !x(sig - 2)
def classify(x: UInt) = {
val sign = x(sig + exp)
val code = x(exp + sig - 1, exp + sig - 3)
val codeHi = code(2, 1)
val isSpecial = codeHi === 3.U
val isHighSubnormalIn = x(exp + sig - 3, sig - 1) < 2.U
val isSubnormal = code === 1.U || codeHi === 1.U && isHighSubnormalIn
val isNormal = codeHi === 1.U && !isHighSubnormalIn || codeHi === 2.U
val isZero = code === 0.U
val isInf = isSpecial && !code(0)
val isNaN = code.andR
val isSNaN = isNaN && !x(sig-2)
val isQNaN = isNaN && x(sig-2)
Cat(isQNaN, isSNaN, isInf && !sign, isNormal && !sign,
isSubnormal && !sign, isZero && !sign, isZero && sign,
isSubnormal && sign, isNormal && sign, isInf && sign)
}
// convert between formats, ignoring rounding, range, NaN
def unsafeConvert(x: UInt, to: FType) = if (this == to) x else {
val sign = x(sig + exp)
val fractIn = x(sig - 2, 0)
val expIn = x(sig + exp - 1, sig - 1)
val fractOut = fractIn << to.sig >> sig
val expOut = {
val expCode = expIn(exp, exp - 2)
val commonCase = (expIn + (1 << to.exp).U) - (1 << exp).U
Mux(expCode === 0.U || expCode >= 6.U, Cat(expCode, commonCase(to.exp - 3, 0)), commonCase(to.exp, 0))
}
Cat(sign, expOut, fractOut)
}
private def ieeeBundle = {
val expWidth = exp
class IEEEBundle extends Bundle {
val sign = Bool()
val exp = UInt(expWidth.W)
val sig = UInt((ieeeWidth-expWidth-1).W)
}
new IEEEBundle
}
def unpackIEEE(x: UInt) = x.asTypeOf(ieeeBundle)
def recode(x: UInt) = hardfloat.recFNFromFN(exp, sig, x)
def ieee(x: UInt) = hardfloat.fNFromRecFN(exp, sig, x)
}
object FType {
val H = new FType(5, 11)
val S = new FType(8, 24)
val D = new FType(11, 53)
val all = List(H, S, D)
}
trait HasFPUParameters {
require(fLen == 0 || FType.all.exists(_.ieeeWidth == fLen))
val minFLen: Int
val fLen: Int
def xLen: Int
val minXLen = 32
val nIntTypes = log2Ceil(xLen/minXLen) + 1
def floatTypes = FType.all.filter(t => minFLen <= t.ieeeWidth && t.ieeeWidth <= fLen)
def minType = floatTypes.head
def maxType = floatTypes.last
def prevType(t: FType) = floatTypes(typeTag(t) - 1)
def maxExpWidth = maxType.exp
def maxSigWidth = maxType.sig
def typeTag(t: FType) = floatTypes.indexOf(t)
def typeTagWbOffset = (FType.all.indexOf(minType) + 1).U
def typeTagGroup(t: FType) = (if (floatTypes.contains(t)) typeTag(t) else typeTag(maxType)).U
// typeTag
def H = typeTagGroup(FType.H)
def S = typeTagGroup(FType.S)
def D = typeTagGroup(FType.D)
def I = typeTag(maxType).U
private def isBox(x: UInt, t: FType): Bool = x(t.sig + t.exp, t.sig + t.exp - 4).andR
private def box(x: UInt, xt: FType, y: UInt, yt: FType): UInt = {
require(xt.ieeeWidth == 2 * yt.ieeeWidth)
val swizzledNaN = Cat(
x(xt.sig + xt.exp, xt.sig + xt.exp - 3),
x(xt.sig - 2, yt.recodedWidth - 1).andR,
x(xt.sig + xt.exp - 5, xt.sig),
y(yt.recodedWidth - 2),
x(xt.sig - 2, yt.recodedWidth - 1),
y(yt.recodedWidth - 1),
y(yt.recodedWidth - 3, 0))
Mux(xt.isNaN(x), swizzledNaN, x)
}
// implement NaN unboxing for FU inputs
def unbox(x: UInt, tag: UInt, exactType: Option[FType]): UInt = {
val outType = exactType.getOrElse(maxType)
def helper(x: UInt, t: FType): Seq[(Bool, UInt)] = {
val prev =
if (t == minType) {
Seq()
} else {
val prevT = prevType(t)
val unswizzled = Cat(
x(prevT.sig + prevT.exp - 1),
x(t.sig - 1),
x(prevT.sig + prevT.exp - 2, 0))
val prev = helper(unswizzled, prevT)
val isbox = isBox(x, t)
prev.map(p => (isbox && p._1, p._2))
}
prev :+ (true.B, t.unsafeConvert(x, outType))
}
val (oks, floats) = helper(x, maxType).unzip
if (exactType.isEmpty || floatTypes.size == 1) {
Mux(oks(tag), floats(tag), maxType.qNaN)
} else {
val t = exactType.get
floats(typeTag(t)) | Mux(oks(typeTag(t)), 0.U, t.qNaN)
}
}
// make sure that the redundant bits in the NaN-boxed encoding are consistent
def consistent(x: UInt): Bool = {
def helper(x: UInt, t: FType): Bool = if (typeTag(t) == 0) true.B else {
val prevT = prevType(t)
val unswizzled = Cat(
x(prevT.sig + prevT.exp - 1),
x(t.sig - 1),
x(prevT.sig + prevT.exp - 2, 0))
val prevOK = !isBox(x, t) || helper(unswizzled, prevT)
val curOK = !t.isNaN(x) || x(t.sig + t.exp - 4) === x(t.sig - 2, prevT.recodedWidth - 1).andR
prevOK && curOK
}
helper(x, maxType)
}
// generate a NaN box from an FU result
def box(x: UInt, t: FType): UInt = {
if (t == maxType) {
x
} else {
val nt = floatTypes(typeTag(t) + 1)
val bigger = box(((BigInt(1) << nt.recodedWidth)-1).U, nt, x, t)
bigger | ((BigInt(1) << maxType.recodedWidth) - (BigInt(1) << nt.recodedWidth)).U
}
}
// generate a NaN box from an FU result
def box(x: UInt, tag: UInt): UInt = {
val opts = floatTypes.map(t => box(x, t))
opts(tag)
}
// zap bits that hardfloat thinks are don't-cares, but we do care about
def sanitizeNaN(x: UInt, t: FType): UInt = {
if (typeTag(t) == 0) {
x
} else {
val maskedNaN = x & ~((BigInt(1) << (t.sig-1)) | (BigInt(1) << (t.sig+t.exp-4))).U(t.recodedWidth.W)
Mux(t.isNaN(x), maskedNaN, x)
}
}
// implement NaN boxing and recoding for FL*/fmv.*.x
def recode(x: UInt, tag: UInt): UInt = {
def helper(x: UInt, t: FType): UInt = {
if (typeTag(t) == 0) {
t.recode(x)
} else {
val prevT = prevType(t)
box(t.recode(x), t, helper(x, prevT), prevT)
}
}
// fill MSBs of subword loads to emulate a wider load of a NaN-boxed value
val boxes = floatTypes.map(t => ((BigInt(1) << maxType.ieeeWidth) - (BigInt(1) << t.ieeeWidth)).U)
helper(boxes(tag) | x, maxType)
}
// implement NaN unboxing and un-recoding for FS*/fmv.x.*
def ieee(x: UInt, t: FType = maxType): UInt = {
if (typeTag(t) == 0) {
t.ieee(x)
} else {
val unrecoded = t.ieee(x)
val prevT = prevType(t)
val prevRecoded = Cat(
x(prevT.recodedWidth-2),
x(t.sig-1),
x(prevT.recodedWidth-3, 0))
val prevUnrecoded = ieee(prevRecoded, prevT)
Cat(unrecoded >> prevT.ieeeWidth, Mux(t.isNaN(x), prevUnrecoded, unrecoded(prevT.ieeeWidth-1, 0)))
}
}
}
abstract class FPUModule(implicit val p: Parameters) extends Module with HasCoreParameters with HasFPUParameters
class FPToInt(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
class Output extends Bundle {
val in = new FPInput
val lt = Bool()
val store = Bits(fLen.W)
val toint = Bits(xLen.W)
val exc = Bits(FPConstants.FLAGS_SZ.W)
}
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new Output)
})
val in = RegEnable(io.in.bits, io.in.valid)
val valid = RegNext(io.in.valid)
val dcmp = Module(new hardfloat.CompareRecFN(maxExpWidth, maxSigWidth))
dcmp.io.a := in.in1
dcmp.io.b := in.in2
dcmp.io.signaling := !in.rm(1)
val tag = in.typeTagOut
val toint_ieee = (floatTypes.map(t => if (t == FType.H) Fill(maxType.ieeeWidth / minXLen, ieee(in.in1)(15, 0).sextTo(minXLen))
else Fill(maxType.ieeeWidth / t.ieeeWidth, ieee(in.in1)(t.ieeeWidth - 1, 0))): Seq[UInt])(tag)
val toint = WireDefault(toint_ieee)
val intType = WireDefault(in.fmt(0))
io.out.bits.store := (floatTypes.map(t => Fill(fLen / t.ieeeWidth, ieee(in.in1)(t.ieeeWidth - 1, 0))): Seq[UInt])(tag)
io.out.bits.toint := ((0 until nIntTypes).map(i => toint((minXLen << i) - 1, 0).sextTo(xLen)): Seq[UInt])(intType)
io.out.bits.exc := 0.U
when (in.rm(0)) {
val classify_out = (floatTypes.map(t => t.classify(maxType.unsafeConvert(in.in1, t))): Seq[UInt])(tag)
toint := classify_out | (toint_ieee >> minXLen << minXLen)
intType := false.B
}
when (in.wflags) { // feq/flt/fle, fcvt
toint := (~in.rm & Cat(dcmp.io.lt, dcmp.io.eq)).orR | (toint_ieee >> minXLen << minXLen)
io.out.bits.exc := dcmp.io.exceptionFlags
intType := false.B
when (!in.ren2) { // fcvt
val cvtType = in.typ.extract(log2Ceil(nIntTypes), 1)
intType := cvtType
val conv = Module(new hardfloat.RecFNToIN(maxExpWidth, maxSigWidth, xLen))
conv.io.in := in.in1
conv.io.roundingMode := in.rm
conv.io.signedOut := ~in.typ(0)
toint := conv.io.out
io.out.bits.exc := Cat(conv.io.intExceptionFlags(2, 1).orR, 0.U(3.W), conv.io.intExceptionFlags(0))
for (i <- 0 until nIntTypes-1) {
val w = minXLen << i
when (cvtType === i.U) {
val narrow = Module(new hardfloat.RecFNToIN(maxExpWidth, maxSigWidth, w))
narrow.io.in := in.in1
narrow.io.roundingMode := in.rm
narrow.io.signedOut := ~in.typ(0)
val excSign = in.in1(maxExpWidth + maxSigWidth) && !maxType.isNaN(in.in1)
val excOut = Cat(conv.io.signedOut === excSign, Fill(w-1, !excSign))
val invalid = conv.io.intExceptionFlags(2) || narrow.io.intExceptionFlags(1)
when (invalid) { toint := Cat(conv.io.out >> w, excOut) }
io.out.bits.exc := Cat(invalid, 0.U(3.W), !invalid && conv.io.intExceptionFlags(0))
}
}
}
}
io.out.valid := valid
io.out.bits.lt := dcmp.io.lt || (dcmp.io.a.asSInt < 0.S && dcmp.io.b.asSInt >= 0.S)
io.out.bits.in := in
}
class IntToFP(val latency: Int)(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
val io = IO(new Bundle {
val in = Flipped(Valid(new IntToFPInput))
val out = Valid(new FPResult)
})
val in = Pipe(io.in)
val tag = in.bits.typeTagIn
val mux = Wire(new FPResult)
mux.exc := 0.U
mux.data := recode(in.bits.in1, tag)
val intValue = {
val res = WireDefault(in.bits.in1.asSInt)
for (i <- 0 until nIntTypes-1) {
val smallInt = in.bits.in1((minXLen << i) - 1, 0)
when (in.bits.typ.extract(log2Ceil(nIntTypes), 1) === i.U) {
res := Mux(in.bits.typ(0), smallInt.zext, smallInt.asSInt)
}
}
res.asUInt
}
when (in.bits.wflags) { // fcvt
// could be improved for RVD/RVQ with a single variable-position rounding
// unit, rather than N fixed-position ones
val i2fResults = for (t <- floatTypes) yield {
val i2f = Module(new hardfloat.INToRecFN(xLen, t.exp, t.sig))
i2f.io.signedIn := ~in.bits.typ(0)
i2f.io.in := intValue
i2f.io.roundingMode := in.bits.rm
i2f.io.detectTininess := hardfloat.consts.tininess_afterRounding
(sanitizeNaN(i2f.io.out, t), i2f.io.exceptionFlags)
}
val (data, exc) = i2fResults.unzip
val dataPadded = data.init.map(d => Cat(data.last >> d.getWidth, d)) :+ data.last
mux.data := dataPadded(tag)
mux.exc := exc(tag)
}
io.out <> Pipe(in.valid, mux, latency-1)
}
class FPToFP(val latency: Int)(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new FPResult)
val lt = Input(Bool()) // from FPToInt
})
val in = Pipe(io.in)
val signNum = Mux(in.bits.rm(1), in.bits.in1 ^ in.bits.in2, Mux(in.bits.rm(0), ~in.bits.in2, in.bits.in2))
val fsgnj = Cat(signNum(fLen), in.bits.in1(fLen-1, 0))
val fsgnjMux = Wire(new FPResult)
fsgnjMux.exc := 0.U
fsgnjMux.data := fsgnj
when (in.bits.wflags) { // fmin/fmax
val isnan1 = maxType.isNaN(in.bits.in1)
val isnan2 = maxType.isNaN(in.bits.in2)
val isInvalid = maxType.isSNaN(in.bits.in1) || maxType.isSNaN(in.bits.in2)
val isNaNOut = isnan1 && isnan2
val isLHS = isnan2 || in.bits.rm(0) =/= io.lt && !isnan1
fsgnjMux.exc := isInvalid << 4
fsgnjMux.data := Mux(isNaNOut, maxType.qNaN, Mux(isLHS, in.bits.in1, in.bits.in2))
}
val inTag = in.bits.typeTagIn
val outTag = in.bits.typeTagOut
val mux = WireDefault(fsgnjMux)
for (t <- floatTypes.init) {
when (outTag === typeTag(t).U) {
mux.data := Cat(fsgnjMux.data >> t.recodedWidth, maxType.unsafeConvert(fsgnjMux.data, t))
}
}
when (in.bits.wflags && !in.bits.ren2) { // fcvt
if (floatTypes.size > 1) {
// widening conversions simply canonicalize NaN operands
val widened = Mux(maxType.isNaN(in.bits.in1), maxType.qNaN, in.bits.in1)
fsgnjMux.data := widened
fsgnjMux.exc := maxType.isSNaN(in.bits.in1) << 4
// narrowing conversions require rounding (for RVQ, this could be
// optimized to use a single variable-position rounding unit, rather
// than two fixed-position ones)
for (outType <- floatTypes.init) when (outTag === typeTag(outType).U && ((typeTag(outType) == 0).B || outTag < inTag)) {
val narrower = Module(new hardfloat.RecFNToRecFN(maxType.exp, maxType.sig, outType.exp, outType.sig))
narrower.io.in := in.bits.in1
narrower.io.roundingMode := in.bits.rm
narrower.io.detectTininess := hardfloat.consts.tininess_afterRounding
val narrowed = sanitizeNaN(narrower.io.out, outType)
mux.data := Cat(fsgnjMux.data >> narrowed.getWidth, narrowed)
mux.exc := narrower.io.exceptionFlags
}
}
}
io.out <> Pipe(in.valid, mux, latency-1)
}
class MulAddRecFNPipe(latency: Int, expWidth: Int, sigWidth: Int) extends Module
{
override def desiredName = s"MulAddRecFNPipe_l${latency}_e${expWidth}_s${sigWidth}"
require(latency<=2)
val io = IO(new Bundle {
val validin = Input(Bool())
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
val validout = Output(Bool())
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul = Module(new hardfloat.MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul = Module(new hardfloat.MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
val valid_stage0 = Wire(Bool())
val roundingMode_stage0 = Wire(UInt(3.W))
val detectTininess_stage0 = Wire(UInt(1.W))
val postmul_regs = if(latency>0) 1 else 0
mulAddRecFNToRaw_postMul.io.fromPreMul := Pipe(io.validin, mulAddRecFNToRaw_preMul.io.toPostMul, postmul_regs).bits
mulAddRecFNToRaw_postMul.io.mulAddResult := Pipe(io.validin, mulAddResult, postmul_regs).bits
mulAddRecFNToRaw_postMul.io.roundingMode := Pipe(io.validin, io.roundingMode, postmul_regs).bits
roundingMode_stage0 := Pipe(io.validin, io.roundingMode, postmul_regs).bits
detectTininess_stage0 := Pipe(io.validin, io.detectTininess, postmul_regs).bits
valid_stage0 := Pipe(io.validin, false.B, postmul_regs).valid
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN = Module(new hardfloat.RoundRawFNToRecFN(expWidth, sigWidth, 0))
val round_regs = if(latency==2) 1 else 0
roundRawFNToRecFN.io.invalidExc := Pipe(valid_stage0, mulAddRecFNToRaw_postMul.io.invalidExc, round_regs).bits
roundRawFNToRecFN.io.in := Pipe(valid_stage0, mulAddRecFNToRaw_postMul.io.rawOut, round_regs).bits
roundRawFNToRecFN.io.roundingMode := Pipe(valid_stage0, roundingMode_stage0, round_regs).bits
roundRawFNToRecFN.io.detectTininess := Pipe(valid_stage0, detectTininess_stage0, round_regs).bits
io.validout := Pipe(valid_stage0, false.B, round_regs).valid
roundRawFNToRecFN.io.infiniteExc := false.B
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
class FPUFMAPipe(val latency: Int, val t: FType)
(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
override def desiredName = s"FPUFMAPipe_l${latency}_f${t.ieeeWidth}"
require(latency>0)
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new FPResult)
})
val valid = RegNext(io.in.valid)
val in = Reg(new FPInput)
when (io.in.valid) {
val one = 1.U << (t.sig + t.exp - 1)
val zero = (io.in.bits.in1 ^ io.in.bits.in2) & (1.U << (t.sig + t.exp))
val cmd_fma = io.in.bits.ren3
val cmd_addsub = io.in.bits.swap23
in := io.in.bits
when (cmd_addsub) { in.in2 := one }
when (!(cmd_fma || cmd_addsub)) { in.in3 := zero }
}
val fma = Module(new MulAddRecFNPipe((latency-1) min 2, t.exp, t.sig))
fma.io.validin := valid
fma.io.op := in.fmaCmd
fma.io.roundingMode := in.rm
fma.io.detectTininess := hardfloat.consts.tininess_afterRounding
fma.io.a := in.in1
fma.io.b := in.in2
fma.io.c := in.in3
val res = Wire(new FPResult)
res.data := sanitizeNaN(fma.io.out, t)
res.exc := fma.io.exceptionFlags
io.out := Pipe(fma.io.validout, res, (latency-3) max 0)
}
class FPU(cfg: FPUParams)(implicit p: Parameters) extends FPUModule()(p) {
val io = IO(new FPUIO)
val (useClockGating, useDebugROB) = coreParams match {
case r: RocketCoreParams =>
val sz = if (r.debugROB.isDefined) r.debugROB.get.size else 1
(r.clockGate, sz < 1)
case _ => (false, false)
}
val clock_en_reg = Reg(Bool())
val clock_en = clock_en_reg || io.cp_req.valid
val gated_clock =
if (!useClockGating) clock
else ClockGate(clock, clock_en, "fpu_clock_gate")
val fp_decoder = Module(new FPUDecoder)
fp_decoder.io.inst := io.inst
val id_ctrl = WireInit(fp_decoder.io.sigs)
coreParams match { case r: RocketCoreParams => r.vector.map(v => {
val v_decode = v.decoder(p) // Only need to get ren1
v_decode.io.inst := io.inst
v_decode.io.vconfig := DontCare // core deals with this
when (v_decode.io.legal && v_decode.io.read_frs1) {
id_ctrl.ren1 := true.B
id_ctrl.swap12 := false.B
id_ctrl.toint := true.B
id_ctrl.typeTagIn := I
id_ctrl.typeTagOut := Mux(io.v_sew === 3.U, D, S)
}
when (v_decode.io.write_frd) { id_ctrl.wen := true.B }
})}
val ex_reg_valid = RegNext(io.valid, false.B)
val ex_reg_inst = RegEnable(io.inst, io.valid)
val ex_reg_ctrl = RegEnable(id_ctrl, io.valid)
val ex_ra = List.fill(3)(Reg(UInt()))
// load/vector response
val load_wb = RegNext(io.ll_resp_val)
val load_wb_typeTag = RegEnable(io.ll_resp_type(1,0) - typeTagWbOffset, io.ll_resp_val)
val load_wb_data = RegEnable(io.ll_resp_data, io.ll_resp_val)
val load_wb_tag = RegEnable(io.ll_resp_tag, io.ll_resp_val)
class FPUImpl { // entering gated-clock domain
val req_valid = ex_reg_valid || io.cp_req.valid
val ex_cp_valid = io.cp_req.fire
val mem_cp_valid = RegNext(ex_cp_valid, false.B)
val wb_cp_valid = RegNext(mem_cp_valid, false.B)
val mem_reg_valid = RegInit(false.B)
val killm = (io.killm || io.nack_mem) && !mem_cp_valid
// Kill X-stage instruction if M-stage is killed. This prevents it from
// speculatively being sent to the div-sqrt unit, which can cause priority
// inversion for two back-to-back divides, the first of which is killed.
val killx = io.killx || mem_reg_valid && killm
mem_reg_valid := ex_reg_valid && !killx || ex_cp_valid
val mem_reg_inst = RegEnable(ex_reg_inst, ex_reg_valid)
val wb_reg_valid = RegNext(mem_reg_valid && (!killm || mem_cp_valid), false.B)
val cp_ctrl = Wire(new FPUCtrlSigs)
cp_ctrl :<>= io.cp_req.bits.viewAsSupertype(new FPUCtrlSigs)
io.cp_resp.valid := false.B
io.cp_resp.bits.data := 0.U
io.cp_resp.bits.exc := DontCare
val ex_ctrl = Mux(ex_cp_valid, cp_ctrl, ex_reg_ctrl)
val mem_ctrl = RegEnable(ex_ctrl, req_valid)
val wb_ctrl = RegEnable(mem_ctrl, mem_reg_valid)
// CoreMonitorBundle to monitor fp register file writes
val frfWriteBundle = Seq.fill(2)(WireInit(new CoreMonitorBundle(xLen, fLen), DontCare))
frfWriteBundle.foreach { i =>
i.clock := clock
i.reset := reset
i.hartid := io.hartid
i.timer := io.time(31,0)
i.valid := false.B
i.wrenx := false.B
i.wrenf := false.B
i.excpt := false.B
}
// regfile
val regfile = Mem(32, Bits((fLen+1).W))
when (load_wb) {
val wdata = recode(load_wb_data, load_wb_typeTag)
regfile(load_wb_tag) := wdata
assert(consistent(wdata))
if (enableCommitLog)
printf("f%d p%d 0x%x\n", load_wb_tag, load_wb_tag + 32.U, ieee(wdata))
if (useDebugROB)
DebugROB.pushWb(clock, reset, io.hartid, load_wb, load_wb_tag + 32.U, ieee(wdata))
frfWriteBundle(0).wrdst := load_wb_tag
frfWriteBundle(0).wrenf := true.B
frfWriteBundle(0).wrdata := ieee(wdata)
}
val ex_rs = ex_ra.map(a => regfile(a))
when (io.valid) {
when (id_ctrl.ren1) {
when (!id_ctrl.swap12) { ex_ra(0) := io.inst(19,15) }
when (id_ctrl.swap12) { ex_ra(1) := io.inst(19,15) }
}
when (id_ctrl.ren2) {
when (id_ctrl.swap12) { ex_ra(0) := io.inst(24,20) }
when (id_ctrl.swap23) { ex_ra(2) := io.inst(24,20) }
when (!id_ctrl.swap12 && !id_ctrl.swap23) { ex_ra(1) := io.inst(24,20) }
}
when (id_ctrl.ren3) { ex_ra(2) := io.inst(31,27) }
}
val ex_rm = Mux(ex_reg_inst(14,12) === 7.U, io.fcsr_rm, ex_reg_inst(14,12))
def fuInput(minT: Option[FType]): FPInput = {
val req = Wire(new FPInput)
val tag = ex_ctrl.typeTagIn
req.viewAsSupertype(new Bundle with HasFPUCtrlSigs) :#= ex_ctrl.viewAsSupertype(new Bundle with HasFPUCtrlSigs)
req.rm := ex_rm
req.in1 := unbox(ex_rs(0), tag, minT)
req.in2 := unbox(ex_rs(1), tag, minT)
req.in3 := unbox(ex_rs(2), tag, minT)
req.typ := ex_reg_inst(21,20)
req.fmt := ex_reg_inst(26,25)
req.fmaCmd := ex_reg_inst(3,2) | (!ex_ctrl.ren3 && ex_reg_inst(27))
when (ex_cp_valid) {
req := io.cp_req.bits
when (io.cp_req.bits.swap12) {
req.in1 := io.cp_req.bits.in2
req.in2 := io.cp_req.bits.in1
}
when (io.cp_req.bits.swap23) {
req.in2 := io.cp_req.bits.in3
req.in3 := io.cp_req.bits.in2
}
}
req
}
val sfma = Module(new FPUFMAPipe(cfg.sfmaLatency, FType.S))
sfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === S
sfma.io.in.bits := fuInput(Some(sfma.t))
val fpiu = Module(new FPToInt)
fpiu.io.in.valid := req_valid && (ex_ctrl.toint || ex_ctrl.div || ex_ctrl.sqrt || (ex_ctrl.fastpipe && ex_ctrl.wflags))
fpiu.io.in.bits := fuInput(None)
io.store_data := fpiu.io.out.bits.store
io.toint_data := fpiu.io.out.bits.toint
when(fpiu.io.out.valid && mem_cp_valid && mem_ctrl.toint){
io.cp_resp.bits.data := fpiu.io.out.bits.toint
io.cp_resp.valid := true.B
}
val ifpu = Module(new IntToFP(cfg.ifpuLatency))
ifpu.io.in.valid := req_valid && ex_ctrl.fromint
ifpu.io.in.bits := fpiu.io.in.bits
ifpu.io.in.bits.in1 := Mux(ex_cp_valid, io.cp_req.bits.in1, io.fromint_data)
val fpmu = Module(new FPToFP(cfg.fpmuLatency))
fpmu.io.in.valid := req_valid && ex_ctrl.fastpipe
fpmu.io.in.bits := fpiu.io.in.bits
fpmu.io.lt := fpiu.io.out.bits.lt
val divSqrt_wen = WireDefault(false.B)
val divSqrt_inFlight = WireDefault(false.B)
val divSqrt_waddr = Reg(UInt(5.W))
val divSqrt_cp = Reg(Bool())
val divSqrt_typeTag = Wire(UInt(log2Up(floatTypes.size).W))
val divSqrt_wdata = Wire(UInt((fLen+1).W))
val divSqrt_flags = Wire(UInt(FPConstants.FLAGS_SZ.W))
divSqrt_typeTag := DontCare
divSqrt_wdata := DontCare
divSqrt_flags := DontCare
// writeback arbitration
case class Pipe(p: Module, lat: Int, cond: (FPUCtrlSigs) => Bool, res: FPResult)
val pipes = List(
Pipe(fpmu, fpmu.latency, (c: FPUCtrlSigs) => c.fastpipe, fpmu.io.out.bits),
Pipe(ifpu, ifpu.latency, (c: FPUCtrlSigs) => c.fromint, ifpu.io.out.bits),
Pipe(sfma, sfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === S, sfma.io.out.bits)) ++
(fLen > 32).option({
val dfma = Module(new FPUFMAPipe(cfg.dfmaLatency, FType.D))
dfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === D
dfma.io.in.bits := fuInput(Some(dfma.t))
Pipe(dfma, dfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === D, dfma.io.out.bits)
}) ++
(minFLen == 16).option({
val hfma = Module(new FPUFMAPipe(cfg.sfmaLatency, FType.H))
hfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === H
hfma.io.in.bits := fuInput(Some(hfma.t))
Pipe(hfma, hfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === H, hfma.io.out.bits)
})
def latencyMask(c: FPUCtrlSigs, offset: Int) = {
require(pipes.forall(_.lat >= offset))
pipes.map(p => Mux(p.cond(c), (1 << p.lat-offset).U, 0.U)).reduce(_|_)
}
def pipeid(c: FPUCtrlSigs) = pipes.zipWithIndex.map(p => Mux(p._1.cond(c), p._2.U, 0.U)).reduce(_|_)
val maxLatency = pipes.map(_.lat).max
val memLatencyMask = latencyMask(mem_ctrl, 2)
class WBInfo extends Bundle {
val rd = UInt(5.W)
val typeTag = UInt(log2Up(floatTypes.size).W)
val cp = Bool()
val pipeid = UInt(log2Ceil(pipes.size).W)
}
val wen = RegInit(0.U((maxLatency-1).W))
val wbInfo = Reg(Vec(maxLatency-1, new WBInfo))
val mem_wen = mem_reg_valid && (mem_ctrl.fma || mem_ctrl.fastpipe || mem_ctrl.fromint)
val write_port_busy = RegEnable(mem_wen && (memLatencyMask & latencyMask(ex_ctrl, 1)).orR || (wen & latencyMask(ex_ctrl, 0)).orR, req_valid)
ccover(mem_reg_valid && write_port_busy, "WB_STRUCTURAL", "structural hazard on writeback")
for (i <- 0 until maxLatency-2) {
when (wen(i+1)) { wbInfo(i) := wbInfo(i+1) }
}
wen := wen >> 1
when (mem_wen) {
when (!killm) {
wen := wen >> 1 | memLatencyMask
}
for (i <- 0 until maxLatency-1) {
when (!write_port_busy && memLatencyMask(i)) {
wbInfo(i).cp := mem_cp_valid
wbInfo(i).typeTag := mem_ctrl.typeTagOut
wbInfo(i).pipeid := pipeid(mem_ctrl)
wbInfo(i).rd := mem_reg_inst(11,7)
}
}
}
val waddr = Mux(divSqrt_wen, divSqrt_waddr, wbInfo(0).rd)
val wb_cp = Mux(divSqrt_wen, divSqrt_cp, wbInfo(0).cp)
val wtypeTag = Mux(divSqrt_wen, divSqrt_typeTag, wbInfo(0).typeTag)
val wdata = box(Mux(divSqrt_wen, divSqrt_wdata, (pipes.map(_.res.data): Seq[UInt])(wbInfo(0).pipeid)), wtypeTag)
val wexc = (pipes.map(_.res.exc): Seq[UInt])(wbInfo(0).pipeid)
when ((!wbInfo(0).cp && wen(0)) || divSqrt_wen) {
assert(consistent(wdata))
regfile(waddr) := wdata
if (enableCommitLog) {
printf("f%d p%d 0x%x\n", waddr, waddr + 32.U, ieee(wdata))
}
frfWriteBundle(1).wrdst := waddr
frfWriteBundle(1).wrenf := true.B
frfWriteBundle(1).wrdata := ieee(wdata)
}
if (useDebugROB) {
DebugROB.pushWb(clock, reset, io.hartid, (!wbInfo(0).cp && wen(0)) || divSqrt_wen, waddr + 32.U, ieee(wdata))
}
when (wb_cp && (wen(0) || divSqrt_wen)) {
io.cp_resp.bits.data := wdata
io.cp_resp.valid := true.B
}
assert(!io.cp_req.valid || pipes.forall(_.lat == pipes.head.lat).B,
s"FPU only supports coprocessor if FMA pipes have uniform latency ${pipes.map(_.lat)}")
// Avoid structural hazards and nacking of external requests
// toint responds in the MEM stage, so an incoming toint can induce a structural hazard against inflight FMAs
io.cp_req.ready := !ex_reg_valid && !(cp_ctrl.toint && wen =/= 0.U) && !divSqrt_inFlight
val wb_toint_valid = wb_reg_valid && wb_ctrl.toint
val wb_toint_exc = RegEnable(fpiu.io.out.bits.exc, mem_ctrl.toint)
io.fcsr_flags.valid := wb_toint_valid || divSqrt_wen || wen(0)
io.fcsr_flags.bits :=
Mux(wb_toint_valid, wb_toint_exc, 0.U) |
Mux(divSqrt_wen, divSqrt_flags, 0.U) |
Mux(wen(0), wexc, 0.U)
val divSqrt_write_port_busy = (mem_ctrl.div || mem_ctrl.sqrt) && wen.orR
io.fcsr_rdy := !(ex_reg_valid && ex_ctrl.wflags || mem_reg_valid && mem_ctrl.wflags || wb_reg_valid && wb_ctrl.toint || wen.orR || divSqrt_inFlight)
io.nack_mem := (write_port_busy || divSqrt_write_port_busy || divSqrt_inFlight) && !mem_cp_valid
io.dec <> id_ctrl
def useScoreboard(f: ((Pipe, Int)) => Bool) = pipes.zipWithIndex.filter(_._1.lat > 3).map(x => f(x)).fold(false.B)(_||_)
io.sboard_set := wb_reg_valid && !wb_cp_valid && RegNext(useScoreboard(_._1.cond(mem_ctrl)) || mem_ctrl.div || mem_ctrl.sqrt || mem_ctrl.vec)
io.sboard_clr := !wb_cp_valid && (divSqrt_wen || (wen(0) && useScoreboard(x => wbInfo(0).pipeid === x._2.U)))
io.sboard_clra := waddr
ccover(io.sboard_clr && load_wb, "DUAL_WRITEBACK", "load and FMA writeback on same cycle")
// we don't currently support round-max-magnitude (rm=4)
io.illegal_rm := io.inst(14,12).isOneOf(5.U, 6.U) || io.inst(14,12) === 7.U && io.fcsr_rm >= 5.U
if (cfg.divSqrt) {
val divSqrt_inValid = mem_reg_valid && (mem_ctrl.div || mem_ctrl.sqrt) && !divSqrt_inFlight
val divSqrt_killed = RegNext(divSqrt_inValid && killm, true.B)
when (divSqrt_inValid) {
divSqrt_waddr := mem_reg_inst(11,7)
divSqrt_cp := mem_cp_valid
}
ccover(divSqrt_inFlight && divSqrt_killed, "DIV_KILLED", "divide killed after issued to divider")
ccover(divSqrt_inFlight && mem_reg_valid && (mem_ctrl.div || mem_ctrl.sqrt), "DIV_BUSY", "divider structural hazard")
ccover(mem_reg_valid && divSqrt_write_port_busy, "DIV_WB_STRUCTURAL", "structural hazard on division writeback")
for (t <- floatTypes) {
val tag = mem_ctrl.typeTagOut
val divSqrt = withReset(divSqrt_killed) { Module(new hardfloat.DivSqrtRecFN_small(t.exp, t.sig, 0)) }
divSqrt.io.inValid := divSqrt_inValid && tag === typeTag(t).U
divSqrt.io.sqrtOp := mem_ctrl.sqrt
divSqrt.io.a := maxType.unsafeConvert(fpiu.io.out.bits.in.in1, t)
divSqrt.io.b := maxType.unsafeConvert(fpiu.io.out.bits.in.in2, t)
divSqrt.io.roundingMode := fpiu.io.out.bits.in.rm
divSqrt.io.detectTininess := hardfloat.consts.tininess_afterRounding
when (!divSqrt.io.inReady) { divSqrt_inFlight := true.B } // only 1 in flight
when (divSqrt.io.outValid_div || divSqrt.io.outValid_sqrt) {
divSqrt_wen := !divSqrt_killed
divSqrt_wdata := sanitizeNaN(divSqrt.io.out, t)
divSqrt_flags := divSqrt.io.exceptionFlags
divSqrt_typeTag := typeTag(t).U
}
}
when (divSqrt_killed) { divSqrt_inFlight := false.B }
} else {
when (id_ctrl.div || id_ctrl.sqrt) { io.illegal_rm := true.B }
}
// gate the clock
clock_en_reg := !useClockGating.B ||
io.keep_clock_enabled || // chicken bit
io.valid || // ID stage
req_valid || // EX stage
mem_reg_valid || mem_cp_valid || // MEM stage
wb_reg_valid || wb_cp_valid || // WB stage
wen.orR || divSqrt_inFlight || // post-WB stage
io.ll_resp_val // load writeback
} // leaving gated-clock domain
val fpuImpl = withClock (gated_clock) { new FPUImpl }
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
property.cover(cond, s"FPU_$label", "Core;;" + desc)
} | module MulAddRecFNPipe_l2_e5_s11(
input clock,
input reset,
input io_validin,
input [1:0] io_op,
input [16:0] io_a,
input [16:0] io_b,
input [16:0] io_c,
input [2:0] io_roundingMode,
output [16:0] io_out,
output [4:0] io_exceptionFlags
);
wire _mulAddRecFNToRaw_postMul_io_invalidExc;
wire _mulAddRecFNToRaw_postMul_io_rawOut_isNaN;
wire _mulAddRecFNToRaw_postMul_io_rawOut_isInf;
wire _mulAddRecFNToRaw_postMul_io_rawOut_isZero;
wire _mulAddRecFNToRaw_postMul_io_rawOut_sign;
wire [6:0] _mulAddRecFNToRaw_postMul_io_rawOut_sExp;
wire [13:0] _mulAddRecFNToRaw_postMul_io_rawOut_sig;
wire [10:0] _mulAddRecFNToRaw_preMul_io_mulAddA;
wire [10:0] _mulAddRecFNToRaw_preMul_io_mulAddB;
wire [21:0] _mulAddRecFNToRaw_preMul_io_mulAddC;
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny;
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB;
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isInfA;
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA;
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isInfB;
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroB;
wire _mulAddRecFNToRaw_preMul_io_toPostMul_signProd;
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isNaNC;
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isInfC;
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroC;
wire [6:0] _mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum;
wire _mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags;
wire _mulAddRecFNToRaw_preMul_io_toPostMul_CIsDominant;
wire [3:0] _mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist;
wire [12:0] _mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC;
wire _mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC;
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isSigNaNAny;
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isNaNAOrB;
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isInfA;
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isZeroA;
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isInfB;
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isZeroB;
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_signProd;
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isNaNC;
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isInfC;
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isZeroC;
reg [6:0] mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_sExpSum;
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_doSubMags;
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_CIsDominant;
reg [3:0] mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_CDom_CAlignDist;
reg [12:0] mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_highAlignedSigC;
reg mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_bit0AlignedSigC;
reg [22:0] mulAddRecFNToRaw_postMul_io_mulAddResult_pipe_b;
reg [2:0] mulAddRecFNToRaw_postMul_io_roundingMode_pipe_b;
reg [2:0] roundingMode_stage0_pipe_b;
reg valid_stage0_pipe_v;
reg roundRawFNToRecFN_io_invalidExc_pipe_b;
reg roundRawFNToRecFN_io_in_pipe_b_isNaN;
reg roundRawFNToRecFN_io_in_pipe_b_isInf;
reg roundRawFNToRecFN_io_in_pipe_b_isZero;
reg roundRawFNToRecFN_io_in_pipe_b_sign;
reg [6:0] roundRawFNToRecFN_io_in_pipe_b_sExp;
reg [13:0] roundRawFNToRecFN_io_in_pipe_b_sig;
reg [2:0] roundRawFNToRecFN_io_roundingMode_pipe_b;
always @(posedge clock) begin
if (io_validin) begin
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isSigNaNAny <= _mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny;
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isNaNAOrB <= _mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB;
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isInfA <= _mulAddRecFNToRaw_preMul_io_toPostMul_isInfA;
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isZeroA <= _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA;
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isInfB <= _mulAddRecFNToRaw_preMul_io_toPostMul_isInfB;
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isZeroB <= _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroB;
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_signProd <= _mulAddRecFNToRaw_preMul_io_toPostMul_signProd;
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isNaNC <= _mulAddRecFNToRaw_preMul_io_toPostMul_isNaNC;
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isInfC <= _mulAddRecFNToRaw_preMul_io_toPostMul_isInfC;
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isZeroC <= _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroC;
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_sExpSum <= _mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum;
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_doSubMags <= _mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags;
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_CIsDominant <= _mulAddRecFNToRaw_preMul_io_toPostMul_CIsDominant;
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_CDom_CAlignDist <= _mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist;
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_highAlignedSigC <= _mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC;
mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_bit0AlignedSigC <= _mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC;
mulAddRecFNToRaw_postMul_io_mulAddResult_pipe_b <= {1'h0, {11'h0, _mulAddRecFNToRaw_preMul_io_mulAddA} * {11'h0, _mulAddRecFNToRaw_preMul_io_mulAddB}} + {1'h0, _mulAddRecFNToRaw_preMul_io_mulAddC};
mulAddRecFNToRaw_postMul_io_roundingMode_pipe_b <= io_roundingMode;
roundingMode_stage0_pipe_b <= io_roundingMode;
end
if (valid_stage0_pipe_v) begin
roundRawFNToRecFN_io_invalidExc_pipe_b <= _mulAddRecFNToRaw_postMul_io_invalidExc;
roundRawFNToRecFN_io_in_pipe_b_isNaN <= _mulAddRecFNToRaw_postMul_io_rawOut_isNaN;
roundRawFNToRecFN_io_in_pipe_b_isInf <= _mulAddRecFNToRaw_postMul_io_rawOut_isInf;
roundRawFNToRecFN_io_in_pipe_b_isZero <= _mulAddRecFNToRaw_postMul_io_rawOut_isZero;
roundRawFNToRecFN_io_in_pipe_b_sign <= _mulAddRecFNToRaw_postMul_io_rawOut_sign;
roundRawFNToRecFN_io_in_pipe_b_sExp <= _mulAddRecFNToRaw_postMul_io_rawOut_sExp;
roundRawFNToRecFN_io_in_pipe_b_sig <= _mulAddRecFNToRaw_postMul_io_rawOut_sig;
roundRawFNToRecFN_io_roundingMode_pipe_b <= roundingMode_stage0_pipe_b;
end
if (reset)
valid_stage0_pipe_v <= 1'h0;
else
valid_stage0_pipe_v <= io_validin;
end
MulAddRecFNToRaw_preMul_e5_s11 mulAddRecFNToRaw_preMul (
.io_op (io_op),
.io_a (io_a),
.io_b (io_b),
.io_c (io_c),
.io_mulAddA (_mulAddRecFNToRaw_preMul_io_mulAddA),
.io_mulAddB (_mulAddRecFNToRaw_preMul_io_mulAddB),
.io_mulAddC (_mulAddRecFNToRaw_preMul_io_mulAddC),
.io_toPostMul_isSigNaNAny (_mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny),
.io_toPostMul_isNaNAOrB (_mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB),
.io_toPostMul_isInfA (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfA),
.io_toPostMul_isZeroA (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA),
.io_toPostMul_isInfB (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfB),
.io_toPostMul_isZeroB (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroB),
.io_toPostMul_signProd (_mulAddRecFNToRaw_preMul_io_toPostMul_signProd),
.io_toPostMul_isNaNC (_mulAddRecFNToRaw_preMul_io_toPostMul_isNaNC),
.io_toPostMul_isInfC (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfC),
.io_toPostMul_isZeroC (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroC),
.io_toPostMul_sExpSum (_mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum),
.io_toPostMul_doSubMags (_mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags),
.io_toPostMul_CIsDominant (_mulAddRecFNToRaw_preMul_io_toPostMul_CIsDominant),
.io_toPostMul_CDom_CAlignDist (_mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist),
.io_toPostMul_highAlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC),
.io_toPostMul_bit0AlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC)
);
MulAddRecFNToRaw_postMul_e5_s11 mulAddRecFNToRaw_postMul (
.io_fromPreMul_isSigNaNAny (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isSigNaNAny),
.io_fromPreMul_isNaNAOrB (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isNaNAOrB),
.io_fromPreMul_isInfA (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isInfA),
.io_fromPreMul_isZeroA (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isZeroA),
.io_fromPreMul_isInfB (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isInfB),
.io_fromPreMul_isZeroB (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isZeroB),
.io_fromPreMul_signProd (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_signProd),
.io_fromPreMul_isNaNC (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isNaNC),
.io_fromPreMul_isInfC (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isInfC),
.io_fromPreMul_isZeroC (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_isZeroC),
.io_fromPreMul_sExpSum (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_sExpSum),
.io_fromPreMul_doSubMags (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_doSubMags),
.io_fromPreMul_CIsDominant (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_CIsDominant),
.io_fromPreMul_CDom_CAlignDist (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_CDom_CAlignDist),
.io_fromPreMul_highAlignedSigC (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_highAlignedSigC),
.io_fromPreMul_bit0AlignedSigC (mulAddRecFNToRaw_postMul_io_fromPreMul_pipe_b_bit0AlignedSigC),
.io_mulAddResult (mulAddRecFNToRaw_postMul_io_mulAddResult_pipe_b),
.io_roundingMode (mulAddRecFNToRaw_postMul_io_roundingMode_pipe_b),
.io_invalidExc (_mulAddRecFNToRaw_postMul_io_invalidExc),
.io_rawOut_isNaN (_mulAddRecFNToRaw_postMul_io_rawOut_isNaN),
.io_rawOut_isInf (_mulAddRecFNToRaw_postMul_io_rawOut_isInf),
.io_rawOut_isZero (_mulAddRecFNToRaw_postMul_io_rawOut_isZero),
.io_rawOut_sign (_mulAddRecFNToRaw_postMul_io_rawOut_sign),
.io_rawOut_sExp (_mulAddRecFNToRaw_postMul_io_rawOut_sExp),
.io_rawOut_sig (_mulAddRecFNToRaw_postMul_io_rawOut_sig)
);
RoundRawFNToRecFN_e5_s11 roundRawFNToRecFN (
.io_invalidExc (roundRawFNToRecFN_io_invalidExc_pipe_b),
.io_infiniteExc (1'h0),
.io_in_isNaN (roundRawFNToRecFN_io_in_pipe_b_isNaN),
.io_in_isInf (roundRawFNToRecFN_io_in_pipe_b_isInf),
.io_in_isZero (roundRawFNToRecFN_io_in_pipe_b_isZero),
.io_in_sign (roundRawFNToRecFN_io_in_pipe_b_sign),
.io_in_sExp (roundRawFNToRecFN_io_in_pipe_b_sExp),
.io_in_sig (roundRawFNToRecFN_io_in_pipe_b_sig),
.io_roundingMode (roundRawFNToRecFN_io_roundingMode_pipe_b),
.io_out (io_out),
.io_exceptionFlags (io_exceptionFlags)
);
endmodule |
Generate the Verilog code corresponding to this Chisel code package boom.v3.ifu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import boom.v3.common._
import boom.v3.util.{BoomCoreStringPrefix}
import scala.math.min
case class BoomBTBParams(
nSets: Int = 128,
nWays: Int = 2,
offsetSz: Int = 13,
extendedNSets: Int = 128
)
class BTBBranchPredictorBank(params: BoomBTBParams = BoomBTBParams())(implicit p: Parameters) extends BranchPredictorBank()(p)
{
override val nSets = params.nSets
override val nWays = params.nWays
val tagSz = vaddrBitsExtended - log2Ceil(nSets) - log2Ceil(fetchWidth) - 1
val offsetSz = params.offsetSz
val extendedNSets = params.extendedNSets
require(isPow2(nSets))
require(isPow2(extendedNSets) || extendedNSets == 0)
require(extendedNSets <= nSets)
require(extendedNSets >= 1)
class BTBEntry extends Bundle {
val offset = SInt(offsetSz.W)
val extended = Bool()
}
val btbEntrySz = offsetSz + 1
class BTBMeta extends Bundle {
val is_br = Bool()
val tag = UInt(tagSz.W)
}
val btbMetaSz = tagSz + 1
class BTBPredictMeta extends Bundle {
val write_way = UInt(log2Ceil(nWays).W)
}
val s1_meta = Wire(new BTBPredictMeta)
val f3_meta = RegNext(RegNext(s1_meta))
io.f3_meta := f3_meta.asUInt
override val metaSz = s1_meta.asUInt.getWidth
val doing_reset = RegInit(true.B)
val reset_idx = RegInit(0.U(log2Ceil(nSets).W))
reset_idx := reset_idx + doing_reset
when (reset_idx === (nSets-1).U) { doing_reset := false.B }
val meta = Seq.fill(nWays) { SyncReadMem(nSets, Vec(bankWidth, UInt(btbMetaSz.W))) }
val btb = Seq.fill(nWays) { SyncReadMem(nSets, Vec(bankWidth, UInt(btbEntrySz.W))) }
val ebtb = SyncReadMem(extendedNSets, UInt(vaddrBitsExtended.W))
val mems = (((0 until nWays) map ({w:Int => Seq(
(f"btb_meta_way$w", nSets, bankWidth * btbMetaSz),
(f"btb_data_way$w", nSets, bankWidth * btbEntrySz))})).flatten ++ Seq(("ebtb", extendedNSets, vaddrBitsExtended)))
val s1_req_rbtb = VecInit(btb.map { b => VecInit(b.read(s0_idx , s0_valid).map(_.asTypeOf(new BTBEntry))) })
val s1_req_rmeta = VecInit(meta.map { m => VecInit(m.read(s0_idx, s0_valid).map(_.asTypeOf(new BTBMeta))) })
val s1_req_rebtb = ebtb.read(s0_idx, s0_valid)
val s1_req_tag = s1_idx >> log2Ceil(nSets)
val s1_resp = Wire(Vec(bankWidth, Valid(UInt(vaddrBitsExtended.W))))
val s1_is_br = Wire(Vec(bankWidth, Bool()))
val s1_is_jal = Wire(Vec(bankWidth, Bool()))
val s1_hit_ohs = VecInit((0 until bankWidth) map { i =>
VecInit((0 until nWays) map { w =>
s1_req_rmeta(w)(i).tag === s1_req_tag(tagSz-1,0)
})
})
val s1_hits = s1_hit_ohs.map { oh => oh.reduce(_||_) }
val s1_hit_ways = s1_hit_ohs.map { oh => PriorityEncoder(oh) }
for (w <- 0 until bankWidth) {
val entry_meta = s1_req_rmeta(s1_hit_ways(w))(w)
val entry_btb = s1_req_rbtb(s1_hit_ways(w))(w)
s1_resp(w).valid := !doing_reset && s1_valid && s1_hits(w)
s1_resp(w).bits := Mux(
entry_btb.extended,
s1_req_rebtb,
(s1_pc.asSInt + (w << 1).S + entry_btb.offset).asUInt)
s1_is_br(w) := !doing_reset && s1_resp(w).valid && entry_meta.is_br
s1_is_jal(w) := !doing_reset && s1_resp(w).valid && !entry_meta.is_br
io.resp.f2(w) := io.resp_in(0).f2(w)
io.resp.f3(w) := io.resp_in(0).f3(w)
when (RegNext(s1_hits(w))) {
io.resp.f2(w).predicted_pc := RegNext(s1_resp(w))
io.resp.f2(w).is_br := RegNext(s1_is_br(w))
io.resp.f2(w).is_jal := RegNext(s1_is_jal(w))
when (RegNext(s1_is_jal(w))) {
io.resp.f2(w).taken := true.B
}
}
when (RegNext(RegNext(s1_hits(w)))) {
io.resp.f3(w).predicted_pc := RegNext(io.resp.f2(w).predicted_pc)
io.resp.f3(w).is_br := RegNext(io.resp.f2(w).is_br)
io.resp.f3(w).is_jal := RegNext(io.resp.f2(w).is_jal)
when (RegNext(RegNext(s1_is_jal(w)))) {
io.resp.f3(w).taken := true.B
}
}
}
val alloc_way = if (nWays > 1) {
val r_metas = Cat(VecInit(s1_req_rmeta.map { w => VecInit(w.map(_.tag)) }).asUInt, s1_req_tag(tagSz-1,0))
val l = log2Ceil(nWays)
val nChunks = (r_metas.getWidth + l - 1) / l
val chunks = (0 until nChunks) map { i =>
r_metas(min((i+1)*l, r_metas.getWidth)-1, i*l)
}
chunks.reduce(_^_)
} else {
0.U
}
s1_meta.write_way := Mux(s1_hits.reduce(_||_),
PriorityEncoder(s1_hit_ohs.map(_.asUInt).reduce(_|_)),
alloc_way)
val s1_update_cfi_idx = s1_update.bits.cfi_idx.bits
val s1_update_meta = s1_update.bits.meta.asTypeOf(new BTBPredictMeta)
val max_offset_value = Cat(0.B, ~(0.U((offsetSz-1).W))).asSInt
val min_offset_value = Cat(1.B, (0.U((offsetSz-1).W))).asSInt
val new_offset_value = (s1_update.bits.target.asSInt -
(s1_update.bits.pc + (s1_update.bits.cfi_idx.bits << 1)).asSInt)
val offset_is_extended = (new_offset_value > max_offset_value ||
new_offset_value < min_offset_value)
val s1_update_wbtb_data = Wire(new BTBEntry)
s1_update_wbtb_data.extended := offset_is_extended
s1_update_wbtb_data.offset := new_offset_value
val s1_update_wbtb_mask = (UIntToOH(s1_update_cfi_idx) &
Fill(bankWidth, s1_update.bits.cfi_idx.valid && s1_update.valid && s1_update.bits.cfi_taken && s1_update.bits.is_commit_update))
val s1_update_wmeta_mask = ((s1_update_wbtb_mask | s1_update.bits.br_mask) &
(Fill(bankWidth, s1_update.valid && s1_update.bits.is_commit_update) |
(Fill(bankWidth, s1_update.valid) & s1_update.bits.btb_mispredicts)
)
)
val s1_update_wmeta_data = Wire(Vec(bankWidth, new BTBMeta))
for (w <- 0 until bankWidth) {
s1_update_wmeta_data(w).tag := Mux(s1_update.bits.btb_mispredicts(w), 0.U, s1_update_idx >> log2Ceil(nSets))
s1_update_wmeta_data(w).is_br := s1_update.bits.br_mask(w)
}
for (w <- 0 until nWays) {
when (doing_reset || s1_update_meta.write_way === w.U || (w == 0 && nWays == 1).B) {
btb(w).write(
Mux(doing_reset,
reset_idx,
s1_update_idx),
Mux(doing_reset,
VecInit(Seq.fill(bankWidth) { 0.U(btbEntrySz.W) }),
VecInit(Seq.fill(bankWidth) { s1_update_wbtb_data.asUInt })),
Mux(doing_reset,
(~(0.U(bankWidth.W))),
s1_update_wbtb_mask).asBools
)
meta(w).write(
Mux(doing_reset,
reset_idx,
s1_update_idx),
Mux(doing_reset,
VecInit(Seq.fill(bankWidth) { 0.U(btbMetaSz.W) }),
VecInit(s1_update_wmeta_data.map(_.asUInt))),
Mux(doing_reset,
(~(0.U(bankWidth.W))),
s1_update_wmeta_mask).asBools
)
}
}
when (s1_update_wbtb_mask =/= 0.U && offset_is_extended) {
ebtb.write(s1_update_idx, s1_update.bits.target)
}
} | module btb_0(
input [6:0] R0_addr,
input R0_en,
input R0_clk,
output [55:0] R0_data,
input [6:0] W0_addr,
input W0_en,
input W0_clk,
input [55:0] W0_data,
input [3:0] W0_mask
);
btb_0_ext btb_0_ext (
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data),
.W0_mask (W0_mask)
);
endmodule |
Generate the Verilog code corresponding to this Chisel code package testchipip.serdes
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config._
class GenericSerializer[T <: Data](t: T, flitWidth: Int) extends Module {
override def desiredName = s"GenericSerializer_${t.typeName}w${t.getWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(t))
val out = Decoupled(new Flit(flitWidth))
val busy = Output(Bool())
})
val dataBits = t.getWidth.max(flitWidth)
val dataBeats = (dataBits - 1) / flitWidth + 1
require(dataBeats >= 1)
val data = Reg(Vec(dataBeats, UInt(flitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready && beat === 0.U
io.out.valid := io.in.valid || beat =/= 0.U
io.out.bits.flit := Mux(beat === 0.U, io.in.bits.asUInt, data(beat))
when (io.out.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) {
data := io.in.bits.asTypeOf(Vec(dataBeats, UInt(flitWidth.W)))
data(0) := DontCare // unused, DCE this
}
}
io.busy := io.out.valid
}
class GenericDeserializer[T <: Data](t: T, flitWidth: Int) extends Module {
override def desiredName = s"GenericDeserializer_${t.typeName}w${t.getWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(t)
val busy = Output(Bool())
})
val dataBits = t.getWidth.max(flitWidth)
val dataBeats = (dataBits - 1) / flitWidth + 1
require(dataBeats >= 1)
val data = Reg(Vec(dataBeats-1, UInt(flitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready || beat =/= (dataBeats-1).U
io.out.valid := io.in.valid && beat === (dataBeats-1).U
io.out.bits := (if (dataBeats == 1) {
io.in.bits.flit.asTypeOf(t)
} else {
Cat(io.in.bits.flit, data.asUInt).asTypeOf(t)
})
when (io.in.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
if (dataBeats > 1) {
when (beat =/= (dataBeats-1).U) {
data(beat(log2Ceil(dataBeats-1)-1,0)) := io.in.bits.flit
}
}
}
io.busy := beat =/= 0.U
}
class FlitToPhit(flitWidth: Int, phitWidth: Int) extends Module {
override def desiredName = s"FlitToPhit_f${flitWidth}_p${phitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Phit(phitWidth))
})
require(flitWidth >= phitWidth)
val dataBeats = (flitWidth - 1) / phitWidth + 1
val data = Reg(Vec(dataBeats-1, UInt(phitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready && beat === 0.U
io.out.valid := io.in.valid || beat =/= 0.U
io.out.bits.phit := (if (dataBeats == 1) io.in.bits.flit else Mux(beat === 0.U, io.in.bits.flit, data(beat-1.U)))
when (io.out.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) {
data := io.in.bits.asTypeOf(Vec(dataBeats, UInt(phitWidth.W))).tail
}
}
}
object FlitToPhit {
def apply(flit: DecoupledIO[Flit], phitWidth: Int): DecoupledIO[Phit] = {
val flit2phit = Module(new FlitToPhit(flit.bits.flitWidth, phitWidth))
flit2phit.io.in <> flit
flit2phit.io.out
}
}
class PhitToFlit(flitWidth: Int, phitWidth: Int) extends Module {
override def desiredName = s"PhitToFlit_p${phitWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Phit(phitWidth)))
val out = Decoupled(new Flit(flitWidth))
})
require(flitWidth >= phitWidth)
val dataBeats = (flitWidth - 1) / phitWidth + 1
val data = Reg(Vec(dataBeats-1, UInt(phitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready || beat =/= (dataBeats-1).U
io.out.valid := io.in.valid && beat === (dataBeats-1).U
io.out.bits.flit := (if (dataBeats == 1) io.in.bits.phit else Cat(io.in.bits.phit, data.asUInt))
when (io.in.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
if (dataBeats > 1) {
when (beat =/= (dataBeats-1).U) {
data(beat) := io.in.bits.phit
}
}
}
}
object PhitToFlit {
def apply(phit: DecoupledIO[Phit], flitWidth: Int): DecoupledIO[Flit] = {
val phit2flit = Module(new PhitToFlit(flitWidth, phit.bits.phitWidth))
phit2flit.io.in <> phit
phit2flit.io.out
}
def apply(phit: ValidIO[Phit], flitWidth: Int): ValidIO[Flit] = {
val phit2flit = Module(new PhitToFlit(flitWidth, phit.bits.phitWidth))
phit2flit.io.in.valid := phit.valid
phit2flit.io.in.bits := phit.bits
when (phit.valid) { assert(phit2flit.io.in.ready) }
val out = Wire(Valid(new Flit(flitWidth)))
out.valid := phit2flit.io.out.valid
out.bits := phit2flit.io.out.bits
phit2flit.io.out.ready := true.B
out
}
}
class PhitArbiter(phitWidth: Int, flitWidth: Int, channels: Int) extends Module {
override def desiredName = s"PhitArbiter_p${phitWidth}_f${flitWidth}_n${channels}"
val io = IO(new Bundle {
val in = Flipped(Vec(channels, Decoupled(new Phit(phitWidth))))
val out = Decoupled(new Phit(phitWidth))
})
if (channels == 1) {
io.out <> io.in(0)
} else {
val headerWidth = log2Ceil(channels)
val headerBeats = (headerWidth - 1) / phitWidth + 1
val flitBeats = (flitWidth - 1) / phitWidth + 1
val beats = headerBeats + flitBeats
val beat = RegInit(0.U(log2Ceil(beats).W))
val chosen_reg = Reg(UInt(headerWidth.W))
val chosen_prio = PriorityEncoder(io.in.map(_.valid))
val chosen = Mux(beat === 0.U, chosen_prio, chosen_reg)
val header_idx = if (headerBeats == 1) 0.U else beat(log2Ceil(headerBeats)-1,0)
io.out.valid := VecInit(io.in.map(_.valid))(chosen)
io.out.bits.phit := Mux(beat < headerBeats.U,
chosen.asTypeOf(Vec(headerBeats, UInt(phitWidth.W)))(header_idx),
VecInit(io.in.map(_.bits.phit))(chosen))
for (i <- 0 until channels) {
io.in(i).ready := io.out.ready && beat >= headerBeats.U && chosen_reg === i.U
}
when (io.out.fire) {
beat := Mux(beat === (beats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) { chosen_reg := chosen_prio }
}
}
}
class PhitDemux(phitWidth: Int, flitWidth: Int, channels: Int) extends Module {
override def desiredName = s"PhitDemux_p${phitWidth}_f${flitWidth}_n${channels}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Phit(phitWidth)))
val out = Vec(channels, Decoupled(new Phit(phitWidth)))
})
if (channels == 1) {
io.out(0) <> io.in
} else {
val headerWidth = log2Ceil(channels)
val headerBeats = (headerWidth - 1) / phitWidth + 1
val flitBeats = (flitWidth - 1) / phitWidth + 1
val beats = headerBeats + flitBeats
val beat = RegInit(0.U(log2Ceil(beats).W))
val channel_vec = Reg(Vec(headerBeats, UInt(phitWidth.W)))
val channel = channel_vec.asUInt(log2Ceil(channels)-1,0)
val header_idx = if (headerBeats == 1) 0.U else beat(log2Ceil(headerBeats)-1,0)
io.in.ready := beat < headerBeats.U || VecInit(io.out.map(_.ready))(channel)
for (c <- 0 until channels) {
io.out(c).valid := io.in.valid && beat >= headerBeats.U && channel === c.U
io.out(c).bits.phit := io.in.bits.phit
}
when (io.in.fire) {
beat := Mux(beat === (beats-1).U, 0.U, beat + 1.U)
when (beat < headerBeats.U) {
channel_vec(header_idx) := io.in.bits.phit
}
}
}
}
class DecoupledFlitToCreditedFlit(flitWidth: Int, bufferSz: Int) extends Module {
override def desiredName = s"DecoupledFlitToCreditedFlit_f${flitWidth}_b${bufferSz}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Flit(flitWidth))
val credit = Flipped(Decoupled(new Flit(flitWidth)))
})
val creditWidth = log2Ceil(bufferSz)
require(creditWidth <= flitWidth)
val credits = RegInit(0.U((creditWidth+1).W))
val credit_incr = io.out.fire
val credit_decr = io.credit.fire
when (credit_incr || credit_decr) {
credits := credits + credit_incr - Mux(io.credit.valid, io.credit.bits.flit +& 1.U, 0.U)
}
io.out.valid := io.in.valid && credits < bufferSz.U
io.out.bits.flit := io.in.bits.flit
io.in.ready := io.out.ready && credits < bufferSz.U
io.credit.ready := true.B
}
class CreditedFlitToDecoupledFlit(flitWidth: Int, bufferSz: Int) extends Module {
override def desiredName = s"CreditedFlitToDecoupledFlit_f${flitWidth}_b${bufferSz}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Flit(flitWidth))
val credit = Decoupled(new Flit(flitWidth))
})
val creditWidth = log2Ceil(bufferSz)
require(creditWidth <= flitWidth)
val buffer = Module(new Queue(new Flit(flitWidth), bufferSz))
val credits = RegInit(0.U((creditWidth+1).W))
val credit_incr = buffer.io.deq.fire
val credit_decr = io.credit.fire
when (credit_incr || credit_decr) {
credits := credit_incr + Mux(credit_decr, 0.U, credits)
}
buffer.io.enq.valid := io.in.valid
buffer.io.enq.bits := io.in.bits
io.in.ready := true.B
when (io.in.valid) { assert(buffer.io.enq.ready) }
io.out <> buffer.io.deq
io.credit.valid := credits =/= 0.U
io.credit.bits.flit := credits - 1.U
} | module PhitDemux_p32_f32_n5(
input clock,
input reset,
output io_in_ready,
input io_in_valid,
input [31:0] io_in_bits_phit,
input io_out_0_ready,
output io_out_0_valid,
output [31:0] io_out_0_bits_phit,
input io_out_1_ready,
output io_out_1_valid,
output [31:0] io_out_1_bits_phit,
input io_out_2_ready,
output io_out_2_valid,
output [31:0] io_out_2_bits_phit,
input io_out_3_ready,
output io_out_3_valid,
output [31:0] io_out_3_bits_phit,
input io_out_4_ready,
output io_out_4_valid,
output [31:0] io_out_4_bits_phit
);
reg beat;
reg [31:0] channel_vec_0;
wire [7:0] _GEN = {{io_out_0_ready}, {io_out_0_ready}, {io_out_0_ready}, {io_out_4_ready}, {io_out_3_ready}, {io_out_2_ready}, {io_out_1_ready}, {io_out_0_ready}};
wire io_in_ready_0 = ~beat | _GEN[channel_vec_0[2:0]];
wire _GEN_0 = io_in_ready_0 & io_in_valid;
always @(posedge clock) begin
if (reset)
beat <= 1'h0;
else if (_GEN_0)
beat <= ~beat & beat - 1'h1;
if (_GEN_0 & ~beat)
channel_vec_0 <= io_in_bits_phit;
end
assign io_in_ready = io_in_ready_0;
assign io_out_0_valid = io_in_valid & beat & channel_vec_0[2:0] == 3'h0;
assign io_out_0_bits_phit = io_in_bits_phit;
assign io_out_1_valid = io_in_valid & beat & channel_vec_0[2:0] == 3'h1;
assign io_out_1_bits_phit = io_in_bits_phit;
assign io_out_2_valid = io_in_valid & beat & channel_vec_0[2:0] == 3'h2;
assign io_out_2_bits_phit = io_in_bits_phit;
assign io_out_3_valid = io_in_valid & beat & channel_vec_0[2:0] == 3'h3;
assign io_out_3_bits_phit = io_in_bits_phit;
assign io_out_4_valid = io_in_valid & beat & channel_vec_0[2:0] == 3'h4;
assign io_out_4_bits_phit = io_in_bits_phit;
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
} | module OptimizationBarrier_UInt(
input [2:0] io_x,
output [2:0] io_y
);
assign io_y = io_x;
endmodule |
Generate the Verilog code corresponding to this Chisel code package boom.v3.ifu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import boom.v3.common._
import boom.v3.util.{BoomCoreStringPrefix, MaskLower, WrapInc}
import scala.math.min
class TageResp extends Bundle {
val ctr = UInt(3.W)
val u = UInt(2.W)
}
class TageTable(val nRows: Int, val tagSz: Int, val histLength: Int, val uBitPeriod: Int)
(implicit p: Parameters) extends BoomModule()(p)
with HasBoomFrontendParameters
{
require(histLength <= globalHistoryLength)
val nWrBypassEntries = 2
val io = IO( new Bundle {
val f1_req_valid = Input(Bool())
val f1_req_pc = Input(UInt(vaddrBitsExtended.W))
val f1_req_ghist = Input(UInt(globalHistoryLength.W))
val f3_resp = Output(Vec(bankWidth, Valid(new TageResp)))
val update_mask = Input(Vec(bankWidth, Bool()))
val update_taken = Input(Vec(bankWidth, Bool()))
val update_alloc = Input(Vec(bankWidth, Bool()))
val update_old_ctr = Input(Vec(bankWidth, UInt(3.W)))
val update_pc = Input(UInt())
val update_hist = Input(UInt())
val update_u_mask = Input(Vec(bankWidth, Bool()))
val update_u = Input(Vec(bankWidth, UInt(2.W)))
})
def compute_folded_hist(hist: UInt, l: Int) = {
val nChunks = (histLength + l - 1) / l
val hist_chunks = (0 until nChunks) map {i =>
hist(min((i+1)*l, histLength)-1, i*l)
}
hist_chunks.reduce(_^_)
}
def compute_tag_and_hash(unhashed_idx: UInt, hist: UInt) = {
val idx_history = compute_folded_hist(hist, log2Ceil(nRows))
val idx = (unhashed_idx ^ idx_history)(log2Ceil(nRows)-1,0)
val tag_history = compute_folded_hist(hist, tagSz)
val tag = ((unhashed_idx >> log2Ceil(nRows)) ^ tag_history)(tagSz-1,0)
(idx, tag)
}
def inc_ctr(ctr: UInt, taken: Bool): UInt = {
Mux(!taken, Mux(ctr === 0.U, 0.U, ctr - 1.U),
Mux(ctr === 7.U, 7.U, ctr + 1.U))
}
val doing_reset = RegInit(true.B)
val reset_idx = RegInit(0.U(log2Ceil(nRows).W))
reset_idx := reset_idx + doing_reset
when (reset_idx === (nRows-1).U) { doing_reset := false.B }
class TageEntry extends Bundle {
val valid = Bool() // TODO: Remove this valid bit
val tag = UInt(tagSz.W)
val ctr = UInt(3.W)
}
val tageEntrySz = 1 + tagSz + 3
val (s1_hashed_idx, s1_tag) = compute_tag_and_hash(fetchIdx(io.f1_req_pc), io.f1_req_ghist)
val hi_us = SyncReadMem(nRows, Vec(bankWidth, Bool()))
val lo_us = SyncReadMem(nRows, Vec(bankWidth, Bool()))
val table = SyncReadMem(nRows, Vec(bankWidth, UInt(tageEntrySz.W)))
val mems = Seq((f"tage_l$histLength", nRows, bankWidth * tageEntrySz))
val s2_tag = RegNext(s1_tag)
val s2_req_rtage = VecInit(table.read(s1_hashed_idx, io.f1_req_valid).map(_.asTypeOf(new TageEntry)))
val s2_req_rhius = hi_us.read(s1_hashed_idx, io.f1_req_valid)
val s2_req_rlous = lo_us.read(s1_hashed_idx, io.f1_req_valid)
val s2_req_rhits = VecInit(s2_req_rtage.map(e => e.valid && e.tag === s2_tag && !doing_reset))
for (w <- 0 until bankWidth) {
// This bit indicates the TAGE table matched here
io.f3_resp(w).valid := RegNext(s2_req_rhits(w))
io.f3_resp(w).bits.u := RegNext(Cat(s2_req_rhius(w), s2_req_rlous(w)))
io.f3_resp(w).bits.ctr := RegNext(s2_req_rtage(w).ctr)
}
val clear_u_ctr = RegInit(0.U((log2Ceil(uBitPeriod) + log2Ceil(nRows) + 1).W))
when (doing_reset) { clear_u_ctr := 1.U } .otherwise { clear_u_ctr := clear_u_ctr + 1.U }
val doing_clear_u = clear_u_ctr(log2Ceil(uBitPeriod)-1,0) === 0.U
val doing_clear_u_hi = doing_clear_u && clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 1.U
val doing_clear_u_lo = doing_clear_u && clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 0.U
val clear_u_idx = clear_u_ctr >> log2Ceil(uBitPeriod)
val (update_idx, update_tag) = compute_tag_and_hash(fetchIdx(io.update_pc), io.update_hist)
val update_wdata = Wire(Vec(bankWidth, new TageEntry))
table.write(
Mux(doing_reset, reset_idx , update_idx),
Mux(doing_reset, VecInit(Seq.fill(bankWidth) { 0.U(tageEntrySz.W) }), VecInit(update_wdata.map(_.asUInt))),
Mux(doing_reset, ~(0.U(bankWidth.W)) , io.update_mask.asUInt).asBools
)
val update_hi_wdata = Wire(Vec(bankWidth, Bool()))
hi_us.write(
Mux(doing_reset, reset_idx, Mux(doing_clear_u_hi, clear_u_idx, update_idx)),
Mux(doing_reset || doing_clear_u_hi, VecInit((0.U(bankWidth.W)).asBools), update_hi_wdata),
Mux(doing_reset || doing_clear_u_hi, ~(0.U(bankWidth.W)), io.update_u_mask.asUInt).asBools
)
val update_lo_wdata = Wire(Vec(bankWidth, Bool()))
lo_us.write(
Mux(doing_reset, reset_idx, Mux(doing_clear_u_lo, clear_u_idx, update_idx)),
Mux(doing_reset || doing_clear_u_lo, VecInit((0.U(bankWidth.W)).asBools), update_lo_wdata),
Mux(doing_reset || doing_clear_u_lo, ~(0.U(bankWidth.W)), io.update_u_mask.asUInt).asBools
)
val wrbypass_tags = Reg(Vec(nWrBypassEntries, UInt(tagSz.W)))
val wrbypass_idxs = Reg(Vec(nWrBypassEntries, UInt(log2Ceil(nRows).W)))
val wrbypass = Reg(Vec(nWrBypassEntries, Vec(bankWidth, UInt(3.W))))
val wrbypass_enq_idx = RegInit(0.U(log2Ceil(nWrBypassEntries).W))
val wrbypass_hits = VecInit((0 until nWrBypassEntries) map { i =>
!doing_reset &&
wrbypass_tags(i) === update_tag &&
wrbypass_idxs(i) === update_idx
})
val wrbypass_hit = wrbypass_hits.reduce(_||_)
val wrbypass_hit_idx = PriorityEncoder(wrbypass_hits)
for (w <- 0 until bankWidth) {
update_wdata(w).ctr := Mux(io.update_alloc(w),
Mux(io.update_taken(w), 4.U,
3.U
),
Mux(wrbypass_hit, inc_ctr(wrbypass(wrbypass_hit_idx)(w), io.update_taken(w)),
inc_ctr(io.update_old_ctr(w), io.update_taken(w))
)
)
update_wdata(w).valid := true.B
update_wdata(w).tag := update_tag
update_hi_wdata(w) := io.update_u(w)(1)
update_lo_wdata(w) := io.update_u(w)(0)
}
when (io.update_mask.reduce(_||_)) {
when (wrbypass_hits.reduce(_||_)) {
wrbypass(wrbypass_hit_idx) := VecInit(update_wdata.map(_.ctr))
} .otherwise {
wrbypass (wrbypass_enq_idx) := VecInit(update_wdata.map(_.ctr))
wrbypass_tags(wrbypass_enq_idx) := update_tag
wrbypass_idxs(wrbypass_enq_idx) := update_idx
wrbypass_enq_idx := WrapInc(wrbypass_enq_idx, nWrBypassEntries)
}
}
}
case class BoomTageParams(
// nSets, histLen, tagSz
tableInfo: Seq[Tuple3[Int, Int, Int]] = Seq(( 128, 2, 7),
( 128, 4, 7),
( 256, 8, 8),
( 256, 16, 8),
( 128, 32, 9),
( 128, 64, 9)),
uBitPeriod: Int = 2048
)
class TageBranchPredictorBank(params: BoomTageParams = BoomTageParams())(implicit p: Parameters) extends BranchPredictorBank()(p)
{
val tageUBitPeriod = params.uBitPeriod
val tageNTables = params.tableInfo.size
class TageMeta extends Bundle
{
val provider = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W)))
val alt_differs = Vec(bankWidth, Output(Bool()))
val provider_u = Vec(bankWidth, Output(UInt(2.W)))
val provider_ctr = Vec(bankWidth, Output(UInt(3.W)))
val allocate = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W)))
}
val f3_meta = Wire(new TageMeta)
override val metaSz = f3_meta.asUInt.getWidth
require(metaSz <= bpdMaxMetaLength)
def inc_u(u: UInt, alt_differs: Bool, mispredict: Bool): UInt = {
Mux(!alt_differs, u,
Mux(mispredict, Mux(u === 0.U, 0.U, u - 1.U),
Mux(u === 3.U, 3.U, u + 1.U)))
}
val tt = params.tableInfo map {
case (n, l, s) => {
val t = Module(new TageTable(n, s, l, params.uBitPeriod))
t.io.f1_req_valid := RegNext(io.f0_valid)
t.io.f1_req_pc := RegNext(io.f0_pc)
t.io.f1_req_ghist := io.f1_ghist
(t, t.mems)
}
}
val tables = tt.map(_._1)
val mems = tt.map(_._2).flatten
val f3_resps = VecInit(tables.map(_.io.f3_resp))
val s1_update_meta = s1_update.bits.meta.asTypeOf(new TageMeta)
val s1_update_mispredict_mask = UIntToOH(s1_update.bits.cfi_idx.bits) &
Fill(bankWidth, s1_update.bits.cfi_mispredicted)
val s1_update_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, Bool()))))
val s1_update_u_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, UInt(1.W)))))
val s1_update_taken = Wire(Vec(tageNTables, Vec(bankWidth, Bool())))
val s1_update_old_ctr = Wire(Vec(tageNTables, Vec(bankWidth, UInt(3.W))))
val s1_update_alloc = Wire(Vec(tageNTables, Vec(bankWidth, Bool())))
val s1_update_u = Wire(Vec(tageNTables, Vec(bankWidth, UInt(2.W))))
s1_update_taken := DontCare
s1_update_old_ctr := DontCare
s1_update_alloc := DontCare
s1_update_u := DontCare
for (w <- 0 until bankWidth) {
var altpred = io.resp_in(0).f3(w).taken
val final_altpred = WireInit(io.resp_in(0).f3(w).taken)
var provided = false.B
var provider = 0.U
io.resp.f3(w).taken := io.resp_in(0).f3(w).taken
for (i <- 0 until tageNTables) {
val hit = f3_resps(i)(w).valid
val ctr = f3_resps(i)(w).bits.ctr
when (hit) {
io.resp.f3(w).taken := Mux(ctr === 3.U || ctr === 4.U, altpred, ctr(2))
final_altpred := altpred
}
provided = provided || hit
provider = Mux(hit, i.U, provider)
altpred = Mux(hit, f3_resps(i)(w).bits.ctr(2), altpred)
}
f3_meta.provider(w).valid := provided
f3_meta.provider(w).bits := provider
f3_meta.alt_differs(w) := final_altpred =/= io.resp.f3(w).taken
f3_meta.provider_u(w) := f3_resps(provider)(w).bits.u
f3_meta.provider_ctr(w) := f3_resps(provider)(w).bits.ctr
// Create a mask of tables which did not hit our query, and also contain useless entries
// and also uses a longer history than the provider
val allocatable_slots = (
VecInit(f3_resps.map(r => !r(w).valid && r(w).bits.u === 0.U)).asUInt &
~(MaskLower(UIntToOH(provider)) & Fill(tageNTables, provided))
)
val alloc_lfsr = random.LFSR(tageNTables max 2)
val first_entry = PriorityEncoder(allocatable_slots)
val masked_entry = PriorityEncoder(allocatable_slots & alloc_lfsr)
val alloc_entry = Mux(allocatable_slots(masked_entry),
masked_entry,
first_entry)
f3_meta.allocate(w).valid := allocatable_slots =/= 0.U
f3_meta.allocate(w).bits := alloc_entry
val update_was_taken = (s1_update.bits.cfi_idx.valid &&
(s1_update.bits.cfi_idx.bits === w.U) &&
s1_update.bits.cfi_taken)
when (s1_update.bits.br_mask(w) && s1_update.valid && s1_update.bits.is_commit_update) {
when (s1_update_meta.provider(w).valid) {
val provider = s1_update_meta.provider(w).bits
s1_update_mask(provider)(w) := true.B
s1_update_u_mask(provider)(w) := true.B
val new_u = inc_u(s1_update_meta.provider_u(w),
s1_update_meta.alt_differs(w),
s1_update_mispredict_mask(w))
s1_update_u (provider)(w) := new_u
s1_update_taken (provider)(w) := update_was_taken
s1_update_old_ctr(provider)(w) := s1_update_meta.provider_ctr(w)
s1_update_alloc (provider)(w) := false.B
}
}
}
when (s1_update.valid && s1_update.bits.is_commit_update && s1_update.bits.cfi_mispredicted && s1_update.bits.cfi_idx.valid) {
val idx = s1_update.bits.cfi_idx.bits
val allocate = s1_update_meta.allocate(idx)
when (allocate.valid) {
s1_update_mask (allocate.bits)(idx) := true.B
s1_update_taken(allocate.bits)(idx) := s1_update.bits.cfi_taken
s1_update_alloc(allocate.bits)(idx) := true.B
s1_update_u_mask(allocate.bits)(idx) := true.B
s1_update_u (allocate.bits)(idx) := 0.U
} .otherwise {
val provider = s1_update_meta.provider(idx)
val decr_mask = Mux(provider.valid, ~MaskLower(UIntToOH(provider.bits)), 0.U)
for (i <- 0 until tageNTables) {
when (decr_mask(i)) {
s1_update_u_mask(i)(idx) := true.B
s1_update_u (i)(idx) := 0.U
}
}
}
}
for (i <- 0 until tageNTables) {
for (w <- 0 until bankWidth) {
tables(i).io.update_mask(w) := RegNext(s1_update_mask(i)(w))
tables(i).io.update_taken(w) := RegNext(s1_update_taken(i)(w))
tables(i).io.update_alloc(w) := RegNext(s1_update_alloc(i)(w))
tables(i).io.update_old_ctr(w) := RegNext(s1_update_old_ctr(i)(w))
tables(i).io.update_u_mask(w) := RegNext(s1_update_u_mask(i)(w))
tables(i).io.update_u(w) := RegNext(s1_update_u(i)(w))
}
tables(i).io.update_pc := RegNext(s1_update.bits.pc)
tables(i).io.update_hist := RegNext(s1_update.bits.ghist)
}
//io.f3_meta := Cat(f3_meta.asUInt, micro.io.f3_meta(micro.metaSz-1,0), base.io.f3_meta(base.metaSz-1, 0))
io.f3_meta := f3_meta.asUInt
} | module hi_us_3(
input [6:0] R0_addr,
input R0_en,
input R0_clk,
output [3:0] R0_data,
input [6:0] W0_addr,
input W0_clk,
input [3:0] W0_data,
input [3:0] W0_mask
);
hi_us_ext hi_us_ext (
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (1'h1),
.W0_clk (W0_clk),
.W0_data (W0_data),
.W0_mask (W0_mask)
);
endmodule |
Generate the Verilog code corresponding to this Chisel code //******************************************************************************
// Ported from Rocket-Chip
// See LICENSE.Berkeley and LICENSE.SiFive in Rocket-Chip for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.lsu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.tile._
import freechips.rocketchip.util._
import freechips.rocketchip.rocket._
import boom.v3.common._
import boom.v3.exu.BrUpdateInfo
import boom.v3.util.{IsKilledByBranch, GetNewBrMask, BranchKillableQueue, IsOlder, UpdateBrMask, AgePriorityEncoder, WrapInc}
class BoomDCacheReqInternal(implicit p: Parameters) extends BoomDCacheReq()(p)
with HasL1HellaCacheParameters
{
// miss info
val tag_match = Bool()
val old_meta = new L1Metadata
val way_en = UInt(nWays.W)
// Used in the MSHRs
val sdq_id = UInt(log2Ceil(cfg.nSDQ).W)
}
class BoomMSHR(implicit edge: TLEdgeOut, p: Parameters) extends BoomModule()(p)
with HasL1HellaCacheParameters
{
val io = IO(new Bundle {
val id = Input(UInt())
val req_pri_val = Input(Bool())
val req_pri_rdy = Output(Bool())
val req_sec_val = Input(Bool())
val req_sec_rdy = Output(Bool())
val clear_prefetch = Input(Bool())
val brupdate = Input(new BrUpdateInfo)
val exception = Input(Bool())
val rob_pnr_idx = Input(UInt(robAddrSz.W))
val rob_head_idx = Input(UInt(robAddrSz.W))
val req = Input(new BoomDCacheReqInternal)
val req_is_probe = Input(Bool())
val idx = Output(Valid(UInt()))
val way = Output(Valid(UInt()))
val tag = Output(Valid(UInt()))
val mem_acquire = Decoupled(new TLBundleA(edge.bundle))
val mem_grant = Flipped(Decoupled(new TLBundleD(edge.bundle)))
val mem_finish = Decoupled(new TLBundleE(edge.bundle))
val prober_state = Input(Valid(UInt(coreMaxAddrBits.W)))
val refill = Decoupled(new L1DataWriteReq)
val meta_write = Decoupled(new L1MetaWriteReq)
val meta_read = Decoupled(new L1MetaReadReq)
val meta_resp = Input(Valid(new L1Metadata))
val wb_req = Decoupled(new WritebackReq(edge.bundle))
// To inform the prefetcher when we are commiting the fetch of this line
val commit_val = Output(Bool())
val commit_addr = Output(UInt(coreMaxAddrBits.W))
val commit_coh = Output(new ClientMetadata)
// Reading from the line buffer
val lb_read = Decoupled(new LineBufferReadReq)
val lb_resp = Input(UInt(encRowBits.W))
val lb_write = Decoupled(new LineBufferWriteReq)
// Replays go through the cache pipeline again
val replay = Decoupled(new BoomDCacheReqInternal)
// Resp go straight out to the core
val resp = Decoupled(new BoomDCacheResp)
// Writeback unit tells us when it is done processing our wb
val wb_resp = Input(Bool())
val probe_rdy = Output(Bool())
})
// TODO: Optimize this. We don't want to mess with cache during speculation
// s_refill_req : Make a request for a new cache line
// s_refill_resp : Store the refill response into our buffer
// s_drain_rpq_loads : Drain out loads from the rpq
// : If miss was misspeculated, go to s_invalid
// s_wb_req : Write back the evicted cache line
// s_wb_resp : Finish writing back the evicted cache line
// s_meta_write_req : Write the metadata for new cache lne
// s_meta_write_resp :
val s_invalid :: s_refill_req :: s_refill_resp :: s_drain_rpq_loads :: s_meta_read :: s_meta_resp_1 :: s_meta_resp_2 :: s_meta_clear :: s_wb_meta_read :: s_wb_req :: s_wb_resp :: s_commit_line :: s_drain_rpq :: s_meta_write_req :: s_mem_finish_1 :: s_mem_finish_2 :: s_prefetched :: s_prefetch :: Nil = Enum(18)
val state = RegInit(s_invalid)
val req = Reg(new BoomDCacheReqInternal)
val req_idx = req.addr(untagBits-1, blockOffBits)
val req_tag = req.addr >> untagBits
val req_block_addr = (req.addr >> blockOffBits) << blockOffBits
val req_needs_wb = RegInit(false.B)
val new_coh = RegInit(ClientMetadata.onReset)
val (_, shrink_param, coh_on_clear) = req.old_meta.coh.onCacheControl(M_FLUSH)
val grow_param = new_coh.onAccess(req.uop.mem_cmd)._2
val coh_on_grant = new_coh.onGrant(req.uop.mem_cmd, io.mem_grant.bits.param)
// We only accept secondary misses if the original request had sufficient permissions
val (cmd_requires_second_acquire, is_hit_again, _, dirtier_coh, dirtier_cmd) =
new_coh.onSecondaryAccess(req.uop.mem_cmd, io.req.uop.mem_cmd)
val (_, _, refill_done, refill_address_inc) = edge.addr_inc(io.mem_grant)
val sec_rdy = (!cmd_requires_second_acquire && !io.req_is_probe &&
!state.isOneOf(s_invalid, s_meta_write_req, s_mem_finish_1, s_mem_finish_2))// Always accept secondary misses
val rpq = Module(new BranchKillableQueue(new BoomDCacheReqInternal, cfg.nRPQ, u => u.uses_ldq, false))
rpq.io.brupdate := io.brupdate
rpq.io.flush := io.exception
assert(!(state === s_invalid && !rpq.io.empty))
rpq.io.enq.valid := ((io.req_pri_val && io.req_pri_rdy) || (io.req_sec_val && io.req_sec_rdy)) && !isPrefetch(io.req.uop.mem_cmd)
rpq.io.enq.bits := io.req
rpq.io.deq.ready := false.B
val grantack = Reg(Valid(new TLBundleE(edge.bundle)))
val refill_ctr = Reg(UInt(log2Ceil(cacheDataBeats).W))
val commit_line = Reg(Bool())
val grant_had_data = Reg(Bool())
val finish_to_prefetch = Reg(Bool())
// Block probes if a tag write we started is still in the pipeline
val meta_hazard = RegInit(0.U(2.W))
when (meta_hazard =/= 0.U) { meta_hazard := meta_hazard + 1.U }
when (io.meta_write.fire) { meta_hazard := 1.U }
io.probe_rdy := (meta_hazard === 0.U && (state.isOneOf(s_invalid, s_refill_req, s_refill_resp, s_drain_rpq_loads) || (state === s_meta_read && grantack.valid)))
io.idx.valid := state =/= s_invalid
io.tag.valid := state =/= s_invalid
io.way.valid := !state.isOneOf(s_invalid, s_prefetch)
io.idx.bits := req_idx
io.tag.bits := req_tag
io.way.bits := req.way_en
io.meta_write.valid := false.B
io.meta_write.bits := DontCare
io.req_pri_rdy := false.B
io.req_sec_rdy := sec_rdy && rpq.io.enq.ready
io.mem_acquire.valid := false.B
io.mem_acquire.bits := DontCare
io.refill.valid := false.B
io.refill.bits := DontCare
io.replay.valid := false.B
io.replay.bits := DontCare
io.wb_req.valid := false.B
io.wb_req.bits := DontCare
io.resp.valid := false.B
io.resp.bits := DontCare
io.commit_val := false.B
io.commit_addr := req.addr
io.commit_coh := coh_on_grant
io.meta_read.valid := false.B
io.meta_read.bits := DontCare
io.mem_finish.valid := false.B
io.mem_finish.bits := DontCare
io.lb_write.valid := false.B
io.lb_write.bits := DontCare
io.lb_read.valid := false.B
io.lb_read.bits := DontCare
io.mem_grant.ready := false.B
when (io.req_sec_val && io.req_sec_rdy) {
req.uop.mem_cmd := dirtier_cmd
when (is_hit_again) {
new_coh := dirtier_coh
}
}
def handle_pri_req(old_state: UInt): UInt = {
val new_state = WireInit(old_state)
grantack.valid := false.B
refill_ctr := 0.U
assert(rpq.io.enq.ready)
req := io.req
val old_coh = io.req.old_meta.coh
req_needs_wb := old_coh.onCacheControl(M_FLUSH)._1 // does the line we are evicting need to be written back
when (io.req.tag_match) {
val (is_hit, _, coh_on_hit) = old_coh.onAccess(io.req.uop.mem_cmd)
when (is_hit) { // set dirty bit
assert(isWrite(io.req.uop.mem_cmd))
new_coh := coh_on_hit
new_state := s_drain_rpq
} .otherwise { // upgrade permissions
new_coh := old_coh
new_state := s_refill_req
}
} .otherwise { // refill and writeback if necessary
new_coh := ClientMetadata.onReset
new_state := s_refill_req
}
new_state
}
when (state === s_invalid) {
io.req_pri_rdy := true.B
grant_had_data := false.B
when (io.req_pri_val && io.req_pri_rdy) {
state := handle_pri_req(state)
}
} .elsewhen (state === s_refill_req) {
io.mem_acquire.valid := true.B
// TODO: Use AcquirePerm if just doing permissions acquire
io.mem_acquire.bits := edge.AcquireBlock(
fromSource = io.id,
toAddress = Cat(req_tag, req_idx) << blockOffBits,
lgSize = lgCacheBlockBytes.U,
growPermissions = grow_param)._2
when (io.mem_acquire.fire) {
state := s_refill_resp
}
} .elsewhen (state === s_refill_resp) {
when (edge.hasData(io.mem_grant.bits)) {
io.mem_grant.ready := io.lb_write.ready
io.lb_write.valid := io.mem_grant.valid
io.lb_write.bits.id := io.id
io.lb_write.bits.offset := refill_address_inc >> rowOffBits
io.lb_write.bits.data := io.mem_grant.bits.data
} .otherwise {
io.mem_grant.ready := true.B
}
when (io.mem_grant.fire) {
grant_had_data := edge.hasData(io.mem_grant.bits)
}
when (refill_done) {
grantack.valid := edge.isRequest(io.mem_grant.bits)
grantack.bits := edge.GrantAck(io.mem_grant.bits)
state := Mux(grant_had_data, s_drain_rpq_loads, s_drain_rpq)
assert(!(!grant_had_data && req_needs_wb))
commit_line := false.B
new_coh := coh_on_grant
}
} .elsewhen (state === s_drain_rpq_loads) {
val drain_load = (isRead(rpq.io.deq.bits.uop.mem_cmd) &&
!isWrite(rpq.io.deq.bits.uop.mem_cmd) &&
(rpq.io.deq.bits.uop.mem_cmd =/= M_XLR)) // LR should go through replay
// drain all loads for now
val rp_addr = Cat(req_tag, req_idx, rpq.io.deq.bits.addr(blockOffBits-1,0))
val word_idx = if (rowWords == 1) 0.U else rp_addr(log2Up(rowWords*coreDataBytes)-1, log2Up(wordBytes))
val data = io.lb_resp
val data_word = data >> Cat(word_idx, 0.U(log2Up(coreDataBits).W))
val loadgen = new LoadGen(rpq.io.deq.bits.uop.mem_size, rpq.io.deq.bits.uop.mem_signed,
Cat(req_tag, req_idx, rpq.io.deq.bits.addr(blockOffBits-1,0)),
data_word, false.B, wordBytes)
rpq.io.deq.ready := io.resp.ready && io.lb_read.ready && drain_load
io.lb_read.valid := rpq.io.deq.valid && drain_load
io.lb_read.bits.id := io.id
io.lb_read.bits.offset := rpq.io.deq.bits.addr >> rowOffBits
io.resp.valid := rpq.io.deq.valid && io.lb_read.fire && drain_load
io.resp.bits.uop := rpq.io.deq.bits.uop
io.resp.bits.data := loadgen.data
io.resp.bits.is_hella := rpq.io.deq.bits.is_hella
when (rpq.io.deq.fire) {
commit_line := true.B
}
.elsewhen (rpq.io.empty && !commit_line)
{
when (!rpq.io.enq.fire) {
state := s_mem_finish_1
finish_to_prefetch := enablePrefetching.B
}
} .elsewhen (rpq.io.empty || (rpq.io.deq.valid && !drain_load)) {
// io.commit_val is for the prefetcher. it tells the prefetcher that this line was correctly acquired
// The prefetcher should consider fetching the next line
io.commit_val := true.B
state := s_meta_read
}
} .elsewhen (state === s_meta_read) {
io.meta_read.valid := !io.prober_state.valid || !grantack.valid || (io.prober_state.bits(untagBits-1,blockOffBits) =/= req_idx)
io.meta_read.bits.idx := req_idx
io.meta_read.bits.tag := req_tag
io.meta_read.bits.way_en := req.way_en
when (io.meta_read.fire) {
state := s_meta_resp_1
}
} .elsewhen (state === s_meta_resp_1) {
state := s_meta_resp_2
} .elsewhen (state === s_meta_resp_2) {
val needs_wb = io.meta_resp.bits.coh.onCacheControl(M_FLUSH)._1
state := Mux(!io.meta_resp.valid, s_meta_read, // Prober could have nack'd this read
Mux(needs_wb, s_meta_clear, s_commit_line))
} .elsewhen (state === s_meta_clear) {
io.meta_write.valid := true.B
io.meta_write.bits.idx := req_idx
io.meta_write.bits.data.coh := coh_on_clear
io.meta_write.bits.data.tag := req_tag
io.meta_write.bits.way_en := req.way_en
when (io.meta_write.fire) {
state := s_wb_req
}
} .elsewhen (state === s_wb_req) {
io.wb_req.valid := true.B
io.wb_req.bits.tag := req.old_meta.tag
io.wb_req.bits.idx := req_idx
io.wb_req.bits.param := shrink_param
io.wb_req.bits.way_en := req.way_en
io.wb_req.bits.source := io.id
io.wb_req.bits.voluntary := true.B
when (io.wb_req.fire) {
state := s_wb_resp
}
} .elsewhen (state === s_wb_resp) {
when (io.wb_resp) {
state := s_commit_line
}
} .elsewhen (state === s_commit_line) {
io.lb_read.valid := true.B
io.lb_read.bits.id := io.id
io.lb_read.bits.offset := refill_ctr
io.refill.valid := io.lb_read.fire
io.refill.bits.addr := req_block_addr | (refill_ctr << rowOffBits)
io.refill.bits.way_en := req.way_en
io.refill.bits.wmask := ~(0.U(rowWords.W))
io.refill.bits.data := io.lb_resp
when (io.refill.fire) {
refill_ctr := refill_ctr + 1.U
when (refill_ctr === (cacheDataBeats - 1).U) {
state := s_drain_rpq
}
}
} .elsewhen (state === s_drain_rpq) {
io.replay <> rpq.io.deq
io.replay.bits.way_en := req.way_en
io.replay.bits.addr := Cat(req_tag, req_idx, rpq.io.deq.bits.addr(blockOffBits-1,0))
when (io.replay.fire && isWrite(rpq.io.deq.bits.uop.mem_cmd)) {
// Set dirty bit
val (is_hit, _, coh_on_hit) = new_coh.onAccess(rpq.io.deq.bits.uop.mem_cmd)
assert(is_hit, "We still don't have permissions for this store")
new_coh := coh_on_hit
}
when (rpq.io.empty && !rpq.io.enq.valid) {
state := s_meta_write_req
}
} .elsewhen (state === s_meta_write_req) {
io.meta_write.valid := true.B
io.meta_write.bits.idx := req_idx
io.meta_write.bits.data.coh := new_coh
io.meta_write.bits.data.tag := req_tag
io.meta_write.bits.way_en := req.way_en
when (io.meta_write.fire) {
state := s_mem_finish_1
finish_to_prefetch := false.B
}
} .elsewhen (state === s_mem_finish_1) {
io.mem_finish.valid := grantack.valid
io.mem_finish.bits := grantack.bits
when (io.mem_finish.fire || !grantack.valid) {
grantack.valid := false.B
state := s_mem_finish_2
}
} .elsewhen (state === s_mem_finish_2) {
state := Mux(finish_to_prefetch, s_prefetch, s_invalid)
} .elsewhen (state === s_prefetch) {
io.req_pri_rdy := true.B
when ((io.req_sec_val && !io.req_sec_rdy) || io.clear_prefetch) {
state := s_invalid
} .elsewhen (io.req_sec_val && io.req_sec_rdy) {
val (is_hit, _, coh_on_hit) = new_coh.onAccess(io.req.uop.mem_cmd)
when (is_hit) { // Proceed with refill
new_coh := coh_on_hit
state := s_meta_read
} .otherwise { // Reacquire this line
new_coh := ClientMetadata.onReset
state := s_refill_req
}
} .elsewhen (io.req_pri_val && io.req_pri_rdy) {
grant_had_data := false.B
state := handle_pri_req(state)
}
}
}
class BoomIOMSHR(id: Int)(implicit edge: TLEdgeOut, p: Parameters) extends BoomModule()(p)
with HasL1HellaCacheParameters
{
val io = IO(new Bundle {
val req = Flipped(Decoupled(new BoomDCacheReq))
val resp = Decoupled(new BoomDCacheResp)
val mem_access = Decoupled(new TLBundleA(edge.bundle))
val mem_ack = Flipped(Valid(new TLBundleD(edge.bundle)))
// We don't need brupdate in here because uncacheable operations are guaranteed non-speculative
})
def beatOffset(addr: UInt) = addr.extract(beatOffBits-1, wordOffBits)
def wordFromBeat(addr: UInt, dat: UInt) = {
val shift = Cat(beatOffset(addr), 0.U((wordOffBits+log2Ceil(wordBytes)).W))
(dat >> shift)(wordBits-1, 0)
}
val req = Reg(new BoomDCacheReq)
val grant_word = Reg(UInt(wordBits.W))
val s_idle :: s_mem_access :: s_mem_ack :: s_resp :: Nil = Enum(4)
val state = RegInit(s_idle)
io.req.ready := state === s_idle
val loadgen = new LoadGen(req.uop.mem_size, req.uop.mem_signed, req.addr, grant_word, false.B, wordBytes)
val a_source = id.U
val a_address = req.addr
val a_size = req.uop.mem_size
val a_data = Fill(beatWords, req.data)
val get = edge.Get(a_source, a_address, a_size)._2
val put = edge.Put(a_source, a_address, a_size, a_data)._2
val atomics = if (edge.manager.anySupportLogical) {
MuxLookup(req.uop.mem_cmd, (0.U).asTypeOf(new TLBundleA(edge.bundle)))(Array(
M_XA_SWAP -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.SWAP)._2,
M_XA_XOR -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.XOR) ._2,
M_XA_OR -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.OR) ._2,
M_XA_AND -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.AND) ._2,
M_XA_ADD -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.ADD)._2,
M_XA_MIN -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MIN)._2,
M_XA_MAX -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MAX)._2,
M_XA_MINU -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MINU)._2,
M_XA_MAXU -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MAXU)._2))
} else {
// If no managers support atomics, assert fail if processor asks for them
assert(state === s_idle || !isAMO(req.uop.mem_cmd))
(0.U).asTypeOf(new TLBundleA(edge.bundle))
}
assert(state === s_idle || req.uop.mem_cmd =/= M_XSC)
io.mem_access.valid := state === s_mem_access
io.mem_access.bits := Mux(isAMO(req.uop.mem_cmd), atomics, Mux(isRead(req.uop.mem_cmd), get, put))
val send_resp = isRead(req.uop.mem_cmd)
io.resp.valid := (state === s_resp) && send_resp
io.resp.bits.is_hella := req.is_hella
io.resp.bits.uop := req.uop
io.resp.bits.data := loadgen.data
when (io.req.fire) {
req := io.req.bits
state := s_mem_access
}
when (io.mem_access.fire) {
state := s_mem_ack
}
when (state === s_mem_ack && io.mem_ack.valid) {
state := s_resp
when (isRead(req.uop.mem_cmd)) {
grant_word := wordFromBeat(req.addr, io.mem_ack.bits.data)
}
}
when (state === s_resp) {
when (!send_resp || io.resp.fire) {
state := s_idle
}
}
}
class LineBufferReadReq(implicit p: Parameters) extends BoomBundle()(p)
with HasL1HellaCacheParameters
{
val id = UInt(log2Ceil(nLBEntries).W)
val offset = UInt(log2Ceil(cacheDataBeats).W)
def lb_addr = Cat(id, offset)
}
class LineBufferWriteReq(implicit p: Parameters) extends LineBufferReadReq()(p)
{
val data = UInt(encRowBits.W)
}
class LineBufferMetaWriteReq(implicit p: Parameters) extends BoomBundle()(p)
{
val id = UInt(log2Ceil(nLBEntries).W)
val coh = new ClientMetadata
val addr = UInt(coreMaxAddrBits.W)
}
class LineBufferMeta(implicit p: Parameters) extends BoomBundle()(p)
with HasL1HellaCacheParameters
{
val coh = new ClientMetadata
val addr = UInt(coreMaxAddrBits.W)
}
class BoomMSHRFile(implicit edge: TLEdgeOut, p: Parameters) extends BoomModule()(p)
with HasL1HellaCacheParameters
{
val io = IO(new Bundle {
val req = Flipped(Vec(memWidth, Decoupled(new BoomDCacheReqInternal))) // Req from s2 of DCache pipe
val req_is_probe = Input(Vec(memWidth, Bool()))
val resp = Decoupled(new BoomDCacheResp)
val secondary_miss = Output(Vec(memWidth, Bool()))
val block_hit = Output(Vec(memWidth, Bool()))
val brupdate = Input(new BrUpdateInfo)
val exception = Input(Bool())
val rob_pnr_idx = Input(UInt(robAddrSz.W))
val rob_head_idx = Input(UInt(robAddrSz.W))
val mem_acquire = Decoupled(new TLBundleA(edge.bundle))
val mem_grant = Flipped(Decoupled(new TLBundleD(edge.bundle)))
val mem_finish = Decoupled(new TLBundleE(edge.bundle))
val refill = Decoupled(new L1DataWriteReq)
val meta_write = Decoupled(new L1MetaWriteReq)
val meta_read = Decoupled(new L1MetaReadReq)
val meta_resp = Input(Valid(new L1Metadata))
val replay = Decoupled(new BoomDCacheReqInternal)
val prefetch = Decoupled(new BoomDCacheReq)
val wb_req = Decoupled(new WritebackReq(edge.bundle))
val prober_state = Input(Valid(UInt(coreMaxAddrBits.W)))
val clear_all = Input(Bool()) // Clears all uncommitted MSHRs to prepare for fence
val wb_resp = Input(Bool())
val fence_rdy = Output(Bool())
val probe_rdy = Output(Bool())
})
val req_idx = OHToUInt(io.req.map(_.valid))
val req = io.req(req_idx)
val req_is_probe = io.req_is_probe(0)
for (w <- 0 until memWidth)
io.req(w).ready := false.B
val prefetcher: DataPrefetcher = if (enablePrefetching) Module(new NLPrefetcher)
else Module(new NullPrefetcher)
io.prefetch <> prefetcher.io.prefetch
val cacheable = edge.manager.supportsAcquireBFast(req.bits.addr, lgCacheBlockBytes.U)
// --------------------
// The MSHR SDQ
val sdq_val = RegInit(0.U(cfg.nSDQ.W))
val sdq_alloc_id = PriorityEncoder(~sdq_val(cfg.nSDQ-1,0))
val sdq_rdy = !sdq_val.andR
val sdq_enq = req.fire && cacheable && isWrite(req.bits.uop.mem_cmd)
val sdq = Mem(cfg.nSDQ, UInt(coreDataBits.W))
when (sdq_enq) {
sdq(sdq_alloc_id) := req.bits.data
}
// --------------------
// The LineBuffer Data
// Holds refilling lines, prefetched lines
val lb = Mem(nLBEntries * cacheDataBeats, UInt(encRowBits.W))
val lb_read_arb = Module(new Arbiter(new LineBufferReadReq, cfg.nMSHRs))
val lb_write_arb = Module(new Arbiter(new LineBufferWriteReq, cfg.nMSHRs))
lb_read_arb.io.out.ready := false.B
lb_write_arb.io.out.ready := true.B
val lb_read_data = WireInit(0.U(encRowBits.W))
when (lb_write_arb.io.out.fire) {
lb.write(lb_write_arb.io.out.bits.lb_addr, lb_write_arb.io.out.bits.data)
} .otherwise {
lb_read_arb.io.out.ready := true.B
when (lb_read_arb.io.out.fire) {
lb_read_data := lb.read(lb_read_arb.io.out.bits.lb_addr)
}
}
def widthMap[T <: Data](f: Int => T) = VecInit((0 until memWidth).map(f))
val idx_matches = Wire(Vec(memWidth, Vec(cfg.nMSHRs, Bool())))
val tag_matches = Wire(Vec(memWidth, Vec(cfg.nMSHRs, Bool())))
val way_matches = Wire(Vec(memWidth, Vec(cfg.nMSHRs, Bool())))
val tag_match = widthMap(w => Mux1H(idx_matches(w), tag_matches(w)))
val idx_match = widthMap(w => idx_matches(w).reduce(_||_))
val way_match = widthMap(w => Mux1H(idx_matches(w), way_matches(w)))
val wb_tag_list = Wire(Vec(cfg.nMSHRs, UInt(tagBits.W)))
val meta_write_arb = Module(new Arbiter(new L1MetaWriteReq , cfg.nMSHRs))
val meta_read_arb = Module(new Arbiter(new L1MetaReadReq , cfg.nMSHRs))
val wb_req_arb = Module(new Arbiter(new WritebackReq(edge.bundle), cfg.nMSHRs))
val replay_arb = Module(new Arbiter(new BoomDCacheReqInternal , cfg.nMSHRs))
val resp_arb = Module(new Arbiter(new BoomDCacheResp , cfg.nMSHRs + nIOMSHRs))
val refill_arb = Module(new Arbiter(new L1DataWriteReq , cfg.nMSHRs))
val commit_vals = Wire(Vec(cfg.nMSHRs, Bool()))
val commit_addrs = Wire(Vec(cfg.nMSHRs, UInt(coreMaxAddrBits.W)))
val commit_cohs = Wire(Vec(cfg.nMSHRs, new ClientMetadata))
var sec_rdy = false.B
io.fence_rdy := true.B
io.probe_rdy := true.B
io.mem_grant.ready := false.B
val mshr_alloc_idx = Wire(UInt())
val pri_rdy = WireInit(false.B)
val pri_val = req.valid && sdq_rdy && cacheable && !idx_match(req_idx)
val mshrs = (0 until cfg.nMSHRs) map { i =>
val mshr = Module(new BoomMSHR)
mshr.io.id := i.U(log2Ceil(cfg.nMSHRs).W)
for (w <- 0 until memWidth) {
idx_matches(w)(i) := mshr.io.idx.valid && mshr.io.idx.bits === io.req(w).bits.addr(untagBits-1,blockOffBits)
tag_matches(w)(i) := mshr.io.tag.valid && mshr.io.tag.bits === io.req(w).bits.addr >> untagBits
way_matches(w)(i) := mshr.io.way.valid && mshr.io.way.bits === io.req(w).bits.way_en
}
wb_tag_list(i) := mshr.io.wb_req.bits.tag
mshr.io.req_pri_val := (i.U === mshr_alloc_idx) && pri_val
when (i.U === mshr_alloc_idx) {
pri_rdy := mshr.io.req_pri_rdy
}
mshr.io.req_sec_val := req.valid && sdq_rdy && tag_match(req_idx) && idx_matches(req_idx)(i) && cacheable
mshr.io.req := req.bits
mshr.io.req_is_probe := req_is_probe
mshr.io.req.sdq_id := sdq_alloc_id
// Clear because of a FENCE, a request to the same idx as a prefetched line,
// a probe to that prefetched line, all mshrs are in use
mshr.io.clear_prefetch := ((io.clear_all && !req.valid)||
(req.valid && idx_matches(req_idx)(i) && cacheable && !tag_match(req_idx)) ||
(req_is_probe && idx_matches(req_idx)(i)))
mshr.io.brupdate := io.brupdate
mshr.io.exception := io.exception
mshr.io.rob_pnr_idx := io.rob_pnr_idx
mshr.io.rob_head_idx := io.rob_head_idx
mshr.io.prober_state := io.prober_state
mshr.io.wb_resp := io.wb_resp
meta_write_arb.io.in(i) <> mshr.io.meta_write
meta_read_arb.io.in(i) <> mshr.io.meta_read
mshr.io.meta_resp := io.meta_resp
wb_req_arb.io.in(i) <> mshr.io.wb_req
replay_arb.io.in(i) <> mshr.io.replay
refill_arb.io.in(i) <> mshr.io.refill
lb_read_arb.io.in(i) <> mshr.io.lb_read
mshr.io.lb_resp := lb_read_data
lb_write_arb.io.in(i) <> mshr.io.lb_write
commit_vals(i) := mshr.io.commit_val
commit_addrs(i) := mshr.io.commit_addr
commit_cohs(i) := mshr.io.commit_coh
mshr.io.mem_grant.valid := false.B
mshr.io.mem_grant.bits := DontCare
when (io.mem_grant.bits.source === i.U) {
mshr.io.mem_grant <> io.mem_grant
}
sec_rdy = sec_rdy || (mshr.io.req_sec_rdy && mshr.io.req_sec_val)
resp_arb.io.in(i) <> mshr.io.resp
when (!mshr.io.req_pri_rdy) {
io.fence_rdy := false.B
}
for (w <- 0 until memWidth) {
when (!mshr.io.probe_rdy && idx_matches(w)(i) && io.req_is_probe(w)) {
io.probe_rdy := false.B
}
}
mshr
}
// Try to round-robin the MSHRs
val mshr_head = RegInit(0.U(log2Ceil(cfg.nMSHRs).W))
mshr_alloc_idx := RegNext(AgePriorityEncoder(mshrs.map(m=>m.io.req_pri_rdy), mshr_head))
when (pri_rdy && pri_val) { mshr_head := WrapInc(mshr_head, cfg.nMSHRs) }
io.meta_write <> meta_write_arb.io.out
io.meta_read <> meta_read_arb.io.out
io.wb_req <> wb_req_arb.io.out
val mmio_alloc_arb = Module(new Arbiter(Bool(), nIOMSHRs))
var mmio_rdy = false.B
val mmios = (0 until nIOMSHRs) map { i =>
val id = cfg.nMSHRs + 1 + i // +1 for wb unit
val mshr = Module(new BoomIOMSHR(id))
mmio_alloc_arb.io.in(i).valid := mshr.io.req.ready
mmio_alloc_arb.io.in(i).bits := DontCare
mshr.io.req.valid := mmio_alloc_arb.io.in(i).ready
mshr.io.req.bits := req.bits
mmio_rdy = mmio_rdy || mshr.io.req.ready
mshr.io.mem_ack.bits := io.mem_grant.bits
mshr.io.mem_ack.valid := io.mem_grant.valid && io.mem_grant.bits.source === id.U
when (io.mem_grant.bits.source === id.U) {
io.mem_grant.ready := true.B
}
resp_arb.io.in(cfg.nMSHRs + i) <> mshr.io.resp
when (!mshr.io.req.ready) {
io.fence_rdy := false.B
}
mshr
}
mmio_alloc_arb.io.out.ready := req.valid && !cacheable
TLArbiter.lowestFromSeq(edge, io.mem_acquire, mshrs.map(_.io.mem_acquire) ++ mmios.map(_.io.mem_access))
TLArbiter.lowestFromSeq(edge, io.mem_finish, mshrs.map(_.io.mem_finish))
val respq = Module(new BranchKillableQueue(new BoomDCacheResp, 4, u => u.uses_ldq, flow = false))
respq.io.brupdate := io.brupdate
respq.io.flush := io.exception
respq.io.enq <> resp_arb.io.out
io.resp <> respq.io.deq
for (w <- 0 until memWidth) {
io.req(w).ready := (w.U === req_idx) &&
Mux(!cacheable, mmio_rdy, sdq_rdy && Mux(idx_match(w), tag_match(w) && sec_rdy, pri_rdy))
io.secondary_miss(w) := idx_match(w) && way_match(w) && !tag_match(w)
io.block_hit(w) := idx_match(w) && tag_match(w)
}
io.refill <> refill_arb.io.out
val free_sdq = io.replay.fire && isWrite(io.replay.bits.uop.mem_cmd)
io.replay <> replay_arb.io.out
io.replay.bits.data := sdq(replay_arb.io.out.bits.sdq_id)
when (io.replay.valid || sdq_enq) {
sdq_val := sdq_val & ~(UIntToOH(replay_arb.io.out.bits.sdq_id) & Fill(cfg.nSDQ, free_sdq)) |
PriorityEncoderOH(~sdq_val(cfg.nSDQ-1,0)) & Fill(cfg.nSDQ, sdq_enq)
}
prefetcher.io.mshr_avail := RegNext(pri_rdy)
prefetcher.io.req_val := RegNext(commit_vals.reduce(_||_))
prefetcher.io.req_addr := RegNext(Mux1H(commit_vals, commit_addrs))
prefetcher.io.req_coh := RegNext(Mux1H(commit_vals, commit_cohs))
} | module sdq_17x64(
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [63:0] R0_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [63:0] W0_data
);
reg [63:0] Memory[0:16];
always @(posedge W0_clk) begin
if (W0_en & 1'h1)
Memory[W0_addr] <= W0_data;
end
assign R0_data = R0_en ? Memory[R0_addr] : 64'bx;
endmodule |
Generate the Verilog code corresponding to this Chisel code /*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import freechips.rocketchip.util._
case class ListBufferParameters[T <: Data](gen: T, queues: Int, entries: Int, bypass: Boolean)
{
val queueBits = log2Up(queues)
val entryBits = log2Up(entries)
}
class ListBufferPush[T <: Data](params: ListBufferParameters[T]) extends Bundle
{
val index = UInt(params.queueBits.W)
val data = Output(params.gen)
}
class ListBuffer[T <: Data](params: ListBufferParameters[T]) extends Module
{
override def desiredName = s"ListBuffer_${params.gen.typeName}_q${params.queues}_e${params.entries}"
val io = IO(new Bundle {
// push is visible on the same cycle; flow queues
val push = Flipped(Decoupled(new ListBufferPush(params)))
val valid = UInt(params.queues.W)
val pop = Flipped(Valid(UInt(params.queueBits.W)))
val data = Output(params.gen)
})
val valid = RegInit(0.U(params.queues.W))
val head = Mem(params.queues, UInt(params.entryBits.W))
val tail = Mem(params.queues, UInt(params.entryBits.W))
val used = RegInit(0.U(params.entries.W))
val next = Mem(params.entries, UInt(params.entryBits.W))
val data = Mem(params.entries, params.gen)
val freeOH = ~(leftOR(~used) << 1) & ~used
val freeIdx = OHToUInt(freeOH)
val valid_set = WireDefault(0.U(params.queues.W))
val valid_clr = WireDefault(0.U(params.queues.W))
val used_set = WireDefault(0.U(params.entries.W))
val used_clr = WireDefault(0.U(params.entries.W))
val push_tail = tail.read(io.push.bits.index)
val push_valid = valid(io.push.bits.index)
io.push.ready := !used.andR
when (io.push.fire) {
valid_set := UIntToOH(io.push.bits.index, params.queues)
used_set := freeOH
data.write(freeIdx, io.push.bits.data)
when (push_valid) {
next.write(push_tail, freeIdx)
} .otherwise {
head.write(io.push.bits.index, freeIdx)
}
tail.write(io.push.bits.index, freeIdx)
}
val pop_head = head.read(io.pop.bits)
val pop_valid = valid(io.pop.bits)
// Bypass push data to the peek port
io.data := (if (!params.bypass) data.read(pop_head) else Mux(!pop_valid, io.push.bits.data, data.read(pop_head)))
io.valid := (if (!params.bypass) valid else (valid | valid_set))
// It is an error to pop something that is not valid
assert (!io.pop.fire || (io.valid)(io.pop.bits))
when (io.pop.fire) {
used_clr := UIntToOH(pop_head, params.entries)
when (pop_head === tail.read(io.pop.bits)) {
valid_clr := UIntToOH(io.pop.bits, params.queues)
}
head.write(io.pop.bits, Mux(io.push.fire && push_valid && push_tail === pop_head, freeIdx, next.read(pop_head)))
}
// Empty bypass changes no state
when ((!params.bypass).B || !io.pop.valid || pop_valid) {
used := (used & ~used_clr) | used_set
valid := (valid & ~valid_clr) | valid_set
}
} | module ListBuffer_QueuedRequest_q15_e35(
input clock,
input reset,
output io_push_ready,
input io_push_valid,
input [3:0] io_push_bits_index,
input io_push_bits_data_control,
input [2:0] io_push_bits_data_opcode,
input [2:0] io_push_bits_data_param,
input [2:0] io_push_bits_data_size,
input [5:0] io_push_bits_data_source,
input [12:0] io_push_bits_data_tag,
input [5:0] io_push_bits_data_offset,
input [5:0] io_push_bits_data_put,
output [14:0] io_valid,
input io_pop_valid,
input [3:0] io_pop_bits,
output io_data_prio_0,
output io_data_prio_2,
output io_data_control,
output [2:0] io_data_opcode,
output [2:0] io_data_param,
output [2:0] io_data_size,
output [5:0] io_data_source,
output [12:0] io_data_tag,
output [5:0] io_data_offset,
output [5:0] io_data_put
);
wire [42:0] _data_ext_R0_data;
wire [5:0] _next_ext_R0_data;
wire [5:0] _tail_ext_R0_data;
wire [5:0] _tail_ext_R1_data;
wire [5:0] _head_ext_R0_data;
reg [14:0] valid;
reg [34:0] used;
wire [34:0] _freeOH_T_22 = ~used;
wire [33:0] _freeOH_T_3 = _freeOH_T_22[33:0] | {_freeOH_T_22[32:0], 1'h0};
wire [33:0] _freeOH_T_6 = _freeOH_T_3 | {_freeOH_T_3[31:0], 2'h0};
wire [33:0] _freeOH_T_9 = _freeOH_T_6 | {_freeOH_T_6[29:0], 4'h0};
wire [33:0] _freeOH_T_12 = _freeOH_T_9 | {_freeOH_T_9[25:0], 8'h0};
wire [33:0] _freeOH_T_15 = _freeOH_T_12 | {_freeOH_T_12[17:0], 16'h0};
wire [34:0] _GEN = {~(_freeOH_T_15 | {_freeOH_T_15[1:0], 32'h0}), 1'h1} & _freeOH_T_22;
wire [30:0] _freeIdx_T_1 = {29'h0, _GEN[34:33]} | _GEN[31:1];
wire [14:0] _freeIdx_T_3 = _freeIdx_T_1[30:16] | _freeIdx_T_1[14:0];
wire [6:0] _freeIdx_T_5 = _freeIdx_T_3[14:8] | _freeIdx_T_3[6:0];
wire [2:0] _freeIdx_T_7 = _freeIdx_T_5[6:4] | _freeIdx_T_5[2:0];
wire [5:0] freeIdx = {|(_GEN[34:32]), |(_freeIdx_T_1[30:15]), |(_freeIdx_T_3[14:7]), |(_freeIdx_T_5[6:3]), |(_freeIdx_T_7[2:1]), _freeIdx_T_7[2] | _freeIdx_T_7[0]};
wire [14:0] _push_valid_T = valid >> io_push_bits_index;
wire io_push_ready_0 = used != 35'h7FFFFFFFF;
wire data_MPORT_en = io_push_ready_0 & io_push_valid;
wire [15:0] _valid_clr_T = 16'h1 << io_pop_bits;
wire [15:0] _valid_set_T = 16'h1 << io_push_bits_index;
wire [63:0] _used_clr_T = 64'h1 << _head_ext_R0_data;
always @(posedge clock) begin
if (reset) begin
valid <= 15'h0;
used <= 35'h0;
end
else begin
valid <= valid & ~(io_pop_valid & _head_ext_R0_data == _tail_ext_R1_data ? _valid_clr_T[14:0] : 15'h0) | (data_MPORT_en ? _valid_set_T[14:0] : 15'h0);
used <= used & ~(io_pop_valid ? _used_clr_T[34:0] : 35'h0) | (data_MPORT_en ? _GEN : 35'h0);
end
end
head_15x6 head_ext (
.R0_addr (io_pop_bits),
.R0_en (1'h1),
.R0_clk (clock),
.R0_data (_head_ext_R0_data),
.W0_addr (io_pop_bits),
.W0_en (io_pop_valid),
.W0_clk (clock),
.W0_data (data_MPORT_en & _push_valid_T[0] & _tail_ext_R0_data == _head_ext_R0_data ? freeIdx : _next_ext_R0_data),
.W1_addr (io_push_bits_index),
.W1_en (data_MPORT_en & ~(_push_valid_T[0])),
.W1_clk (clock),
.W1_data (freeIdx)
);
tail_15x6 tail_ext (
.R0_addr (io_push_bits_index),
.R0_en (1'h1),
.R0_clk (clock),
.R0_data (_tail_ext_R0_data),
.R1_addr (io_pop_bits),
.R1_en (io_pop_valid),
.R1_clk (clock),
.R1_data (_tail_ext_R1_data),
.W0_addr (io_push_bits_index),
.W0_en (data_MPORT_en),
.W0_clk (clock),
.W0_data (freeIdx)
);
next_35x6 next_ext (
.R0_addr (_head_ext_R0_data),
.R0_en (io_pop_valid),
.R0_clk (clock),
.R0_data (_next_ext_R0_data),
.W0_addr (_tail_ext_R0_data),
.W0_en (data_MPORT_en & _push_valid_T[0]),
.W0_clk (clock),
.W0_data (freeIdx)
);
data_35x43 data_ext (
.R0_addr (_head_ext_R0_data),
.R0_en (1'h1),
.R0_clk (clock),
.R0_data (_data_ext_R0_data),
.W0_addr (freeIdx),
.W0_en (data_MPORT_en),
.W0_clk (clock),
.W0_data ({io_push_bits_data_put, io_push_bits_data_offset, io_push_bits_data_tag, io_push_bits_data_source, io_push_bits_data_size, io_push_bits_data_param, io_push_bits_data_opcode, io_push_bits_data_control, 2'h1})
);
assign io_push_ready = io_push_ready_0;
assign io_valid = valid;
assign io_data_prio_0 = _data_ext_R0_data[0];
assign io_data_prio_2 = _data_ext_R0_data[1];
assign io_data_control = _data_ext_R0_data[2];
assign io_data_opcode = _data_ext_R0_data[5:3];
assign io_data_param = _data_ext_R0_data[8:6];
assign io_data_size = _data_ext_R0_data[11:9];
assign io_data_source = _data_ext_R0_data[17:12];
assign io_data_tag = _data_ext_R0_data[30:18];
assign io_data_offset = _data_ext_R0_data[36:31];
assign io_data_put = _data_ext_R0_data[42:37];
endmodule |
Generate the Verilog code corresponding to this Chisel code /*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import freechips.rocketchip.util._
case class ListBufferParameters[T <: Data](gen: T, queues: Int, entries: Int, bypass: Boolean)
{
val queueBits = log2Up(queues)
val entryBits = log2Up(entries)
}
class ListBufferPush[T <: Data](params: ListBufferParameters[T]) extends Bundle
{
val index = UInt(params.queueBits.W)
val data = Output(params.gen)
}
class ListBuffer[T <: Data](params: ListBufferParameters[T]) extends Module
{
override def desiredName = s"ListBuffer_${params.gen.typeName}_q${params.queues}_e${params.entries}"
val io = IO(new Bundle {
// push is visible on the same cycle; flow queues
val push = Flipped(Decoupled(new ListBufferPush(params)))
val valid = UInt(params.queues.W)
val pop = Flipped(Valid(UInt(params.queueBits.W)))
val data = Output(params.gen)
})
val valid = RegInit(0.U(params.queues.W))
val head = Mem(params.queues, UInt(params.entryBits.W))
val tail = Mem(params.queues, UInt(params.entryBits.W))
val used = RegInit(0.U(params.entries.W))
val next = Mem(params.entries, UInt(params.entryBits.W))
val data = Mem(params.entries, params.gen)
val freeOH = ~(leftOR(~used) << 1) & ~used
val freeIdx = OHToUInt(freeOH)
val valid_set = WireDefault(0.U(params.queues.W))
val valid_clr = WireDefault(0.U(params.queues.W))
val used_set = WireDefault(0.U(params.entries.W))
val used_clr = WireDefault(0.U(params.entries.W))
val push_tail = tail.read(io.push.bits.index)
val push_valid = valid(io.push.bits.index)
io.push.ready := !used.andR
when (io.push.fire) {
valid_set := UIntToOH(io.push.bits.index, params.queues)
used_set := freeOH
data.write(freeIdx, io.push.bits.data)
when (push_valid) {
next.write(push_tail, freeIdx)
} .otherwise {
head.write(io.push.bits.index, freeIdx)
}
tail.write(io.push.bits.index, freeIdx)
}
val pop_head = head.read(io.pop.bits)
val pop_valid = valid(io.pop.bits)
// Bypass push data to the peek port
io.data := (if (!params.bypass) data.read(pop_head) else Mux(!pop_valid, io.push.bits.data, data.read(pop_head)))
io.valid := (if (!params.bypass) valid else (valid | valid_set))
// It is an error to pop something that is not valid
assert (!io.pop.fire || (io.valid)(io.pop.bits))
when (io.pop.fire) {
used_clr := UIntToOH(pop_head, params.entries)
when (pop_head === tail.read(io.pop.bits)) {
valid_clr := UIntToOH(io.pop.bits, params.queues)
}
head.write(io.pop.bits, Mux(io.push.fire && push_valid && push_tail === pop_head, freeIdx, next.read(pop_head)))
}
// Empty bypass changes no state
when ((!params.bypass).B || !io.pop.valid || pop_valid) {
used := (used & ~used_clr) | used_set
valid := (valid & ~valid_clr) | valid_set
}
} | module head_2x4(
input R0_addr,
input R0_en,
input R0_clk,
output [3:0] R0_data,
input W0_addr,
input W0_en,
input W0_clk,
input [3:0] W0_data,
input W1_addr,
input W1_en,
input W1_clk,
input [3:0] W1_data
);
reg [3:0] Memory[0:1];
always @(posedge W0_clk) begin
if (W0_en & 1'h1)
Memory[W0_addr] <= W0_data;
if (W1_en & 1'h1)
Memory[W1_addr] <= W1_data;
end
assign R0_data = R0_en ? Memory[R0_addr] : 4'bx;
endmodule |
Generate the Verilog code corresponding to this Chisel code // See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{Decoupled, DecoupledIO}
// A Repeater passes its input to its output, unless repeat is asserted.
// When repeat is asserted, the Repeater copies the input and repeats it next cycle.
class Repeater[T <: Data](gen: T) extends Module
{
override def desiredName = s"Repeater_${gen.typeName}"
val io = IO( new Bundle {
val repeat = Input(Bool())
val full = Output(Bool())
val enq = Flipped(Decoupled(gen.cloneType))
val deq = Decoupled(gen.cloneType)
} )
val full = RegInit(false.B)
val saved = Reg(gen.cloneType)
// When !full, a repeater is pass-through
io.deq.valid := io.enq.valid || full
io.enq.ready := io.deq.ready && !full
io.deq.bits := Mux(full, saved, io.enq.bits)
io.full := full
when (io.enq.fire && io.repeat) { full := true.B; saved := io.enq.bits }
when (io.deq.fire && !io.repeat) { full := false.B }
}
object Repeater
{
def apply[T <: Data](enq: DecoupledIO[T], repeat: Bool): DecoupledIO[T] = {
val repeater = Module(new Repeater(chiselTypeOf(enq.bits)))
repeater.io.repeat := repeat
repeater.io.enq <> enq
repeater.io.deq
}
} | module Repeater_TLBundleA_a17d64s7k1z3u(
input clock,
input reset,
input io_repeat,
output io_full,
output io_enq_ready,
input io_enq_valid,
input [2:0] io_enq_bits_opcode,
input [2:0] io_enq_bits_param,
input [2:0] io_enq_bits_size,
input [6:0] io_enq_bits_source,
input [16:0] io_enq_bits_address,
input [7:0] io_enq_bits_mask,
input io_enq_bits_corrupt,
input io_deq_ready,
output io_deq_valid,
output [2:0] io_deq_bits_opcode,
output [2:0] io_deq_bits_param,
output [2:0] io_deq_bits_size,
output [6:0] io_deq_bits_source,
output [16:0] io_deq_bits_address,
output [7:0] io_deq_bits_mask,
output io_deq_bits_corrupt
);
reg full;
reg [2:0] saved_opcode;
reg [2:0] saved_param;
reg [2:0] saved_size;
reg [6:0] saved_source;
reg [16:0] saved_address;
reg [7:0] saved_mask;
reg saved_corrupt;
wire io_deq_valid_0 = io_enq_valid | full;
wire io_enq_ready_0 = io_deq_ready & ~full;
wire _GEN = io_enq_ready_0 & io_enq_valid & io_repeat;
always @(posedge clock) begin
if (reset)
full <= 1'h0;
else
full <= ~(io_deq_ready & io_deq_valid_0 & ~io_repeat) & (_GEN | full);
if (_GEN) begin
saved_opcode <= io_enq_bits_opcode;
saved_param <= io_enq_bits_param;
saved_size <= io_enq_bits_size;
saved_source <= io_enq_bits_source;
saved_address <= io_enq_bits_address;
saved_mask <= io_enq_bits_mask;
saved_corrupt <= io_enq_bits_corrupt;
end
end
assign io_full = full;
assign io_enq_ready = io_enq_ready_0;
assign io_deq_valid = io_deq_valid_0;
assign io_deq_bits_opcode = full ? saved_opcode : io_enq_bits_opcode;
assign io_deq_bits_param = full ? saved_param : io_enq_bits_param;
assign io_deq_bits_size = full ? saved_size : io_enq_bits_size;
assign io_deq_bits_source = full ? saved_source : io_enq_bits_source;
assign io_deq_bits_address = full ? saved_address : io_enq_bits_address;
assign io_deq_bits_mask = full ? saved_mask : io_enq_bits_mask;
assign io_deq_bits_corrupt = full ? saved_corrupt : io_enq_bits_corrupt;
endmodule |
Generate the Verilog code corresponding to this Chisel code //******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v3.common.{MicroOp}
import boom.v3.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U}
def apply(ip: UInt, isel: UInt): SInt = {
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt
}
}
/**
* Object to get the FP rounding mode out of a packed immediate.
*/
object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } }
/**
* Object to get the FP function fype from a packed immediate.
* Note: only works if !(IS_B or IS_S)
*/
object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } }
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v3.common.BoomModule()(p)
with boom.v3.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire)
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop)
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop))
io.deq.bits := out
io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop)
// For flow queue behavior.
if (flow) {
when (io.empty) {
io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop)
io.deq.bits := io.enq.bits
io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
do_deq := false.B
when (io.deq.ready) { do_enq := false.B }
}
}
private val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
} | module ram_5x77(
input [2:0] R0_addr,
input R0_en,
input R0_clk,
output [76:0] R0_data,
input [2:0] W0_addr,
input W0_en,
input W0_clk,
input [76:0] W0_data
);
reg [76:0] Memory[0:4];
always @(posedge W0_clk) begin
if (W0_en & 1'h1)
Memory[W0_addr] <= W0_data;
end
assign R0_data = R0_en ? Memory[R0_addr] : 77'bx;
endmodule |
Generate the Verilog code corresponding to this Chisel code package testchipip.serdes
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util._
import freechips.rocketchip.tilelink._
trait TLFieldHelper {
def getBodyFields(b: TLChannel): Seq[Data] = b match {
case b: TLBundleA => Seq(b.mask, b.data, b.corrupt)
case b: TLBundleB => Seq(b.mask, b.data, b.corrupt)
case b: TLBundleC => Seq( b.data, b.corrupt)
case b: TLBundleD => Seq( b.data, b.corrupt)
case b: TLBundleE => Seq()
}
def getConstFields(b: TLChannel): Seq[Data] = b match {
case b: TLBundleA => Seq(b.opcode, b.param, b.size, b.source, b.address, b.user, b.echo )
case b: TLBundleB => Seq(b.opcode, b.param, b.size, b.source, b.address )
case b: TLBundleC => Seq(b.opcode, b.param, b.size, b.source, b.address, b.user, b.echo )
case b: TLBundleD => Seq(b.opcode, b.param, b.size, b.source, b.user, b.echo, b.sink, b.denied)
case b: TLBundleE => Seq( b.sink )
}
def minTLPayloadWidth(b: TLChannel): Int = Seq(getBodyFields(b), getConstFields(b)).map(_.map(_.getWidth).sum).max
def minTLPayloadWidth(bs: Seq[TLChannel]): Int = bs.map(b => minTLPayloadWidth(b)).max
def minTLPayloadWidth(b: TLBundle): Int = minTLPayloadWidth(Seq(b.a, b.b, b.c, b.d, b.e).map(_.bits))
}
class TLBeat(val beatWidth: Int) extends Bundle {
val payload = UInt(beatWidth.W)
val head = Bool()
val tail = Bool()
}
abstract class TLChannelToBeat[T <: TLChannel](gen: => T, edge: TLEdge, nameSuffix: Option[String])(implicit val p: Parameters) extends Module with TLFieldHelper {
override def desiredName = (Seq(this.getClass.getSimpleName) ++ nameSuffix ++ Seq(gen.params.shortName)).mkString("_")
val beatWidth = minTLPayloadWidth(gen)
val io = IO(new Bundle {
val protocol = Flipped(Decoupled(gen))
val beat = Decoupled(new TLBeat(beatWidth))
})
def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B
// convert decoupled to irrevocable
val q = Module(new Queue(gen, 1, pipe=true, flow=true))
q.io.enq <> io.protocol
val protocol = q.io.deq
val has_body = Wire(Bool())
val body_fields = getBodyFields(protocol.bits)
val const_fields = getConstFields(protocol.bits)
val head = edge.first(protocol.bits, protocol.fire)
val tail = edge.last(protocol.bits, protocol.fire)
val body = Cat( body_fields.filter(_.getWidth > 0).map(_.asUInt))
val const = Cat(const_fields.filter(_.getWidth > 0).map(_.asUInt))
val is_body = RegInit(false.B)
io.beat.valid := protocol.valid
protocol.ready := io.beat.ready && (is_body || !has_body)
io.beat.bits.head := head && !is_body
io.beat.bits.tail := tail && (is_body || !has_body)
io.beat.bits.payload := Mux(is_body, body, const)
when (io.beat.fire && io.beat.bits.head) { is_body := true.B }
when (io.beat.fire && io.beat.bits.tail) { is_body := false.B }
}
abstract class TLChannelFromBeat[T <: TLChannel](gen: => T, nameSuffix: Option[String])(implicit val p: Parameters) extends Module with TLFieldHelper {
override def desiredName = (Seq(this.getClass.getSimpleName) ++ nameSuffix ++ Seq(gen.params.shortName)).mkString("_")
val beatWidth = minTLPayloadWidth(gen)
val io = IO(new Bundle {
val protocol = Decoupled(gen)
val beat = Flipped(Decoupled(new TLBeat(beatWidth)))
})
// Handle size = 1 gracefully (Chisel3 empty range is broken)
def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0)
val protocol = Wire(Decoupled(gen))
io.protocol <> protocol
val body_fields = getBodyFields(protocol.bits)
val const_fields = getConstFields(protocol.bits)
val is_const = RegInit(true.B)
val const_reg = Reg(UInt(const_fields.map(_.getWidth).sum.W))
val const = Mux(io.beat.bits.head, io.beat.bits.payload, const_reg)
io.beat.ready := (is_const && !io.beat.bits.tail) || protocol.ready
protocol.valid := (!is_const || io.beat.bits.tail) && io.beat.valid
def assign(i: UInt, sigs: Seq[Data]) = {
var t = i
for (s <- sigs.reverse) {
s := t.asTypeOf(s.cloneType)
t = t >> s.getWidth
}
}
assign(const, const_fields)
assign(io.beat.bits.payload, body_fields)
when (io.beat.fire && io.beat.bits.head) { is_const := false.B; const_reg := io.beat.bits.payload }
when (io.beat.fire && io.beat.bits.tail) { is_const := true.B }
}
class TLAToBeat(edgeIn: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleA(bundle), edgeIn, nameSuffix)(p) {
has_body := edgeIn.hasData(protocol.bits) || (~protocol.bits.mask =/= 0.U)
}
class TLAFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleA(bundle), nameSuffix)(p) {
when (io.beat.bits.head) { io.protocol.bits.mask := ~(0.U(io.protocol.bits.mask.getWidth.W)) }
}
class TLBToBeat(edgeOut: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleB(bundle), edgeOut, nameSuffix)(p) {
has_body := edgeOut.hasData(protocol.bits) || (~protocol.bits.mask =/= 0.U)
}
class TLBFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleB(bundle), nameSuffix)(p) {
when (io.beat.bits.head) { io.protocol.bits.mask := ~(0.U(io.protocol.bits.mask.getWidth.W)) }
}
class TLCToBeat(edgeIn: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleC(bundle), edgeIn, nameSuffix)(p) {
has_body := edgeIn.hasData(protocol.bits)
}
class TLCFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleC(bundle), nameSuffix)(p)
class TLDToBeat(edgeOut: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleD(bundle), edgeOut, nameSuffix)(p) {
has_body := edgeOut.hasData(protocol.bits)
}
class TLDFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleD(bundle), nameSuffix)(p)
class TLEToBeat(edgeIn: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleE(bundle), edgeIn, nameSuffix)(p) {
has_body := edgeIn.hasData(protocol.bits)
}
class TLEFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleE(bundle), nameSuffix)(p) | module TLDFromBeat_SerialRAM_a64d64s8k8z8c(
input clock,
input reset,
input io_protocol_ready,
output io_protocol_valid,
output [2:0] io_protocol_bits_opcode,
output [1:0] io_protocol_bits_param,
output [7:0] io_protocol_bits_size,
output [7:0] io_protocol_bits_source,
output [7:0] io_protocol_bits_sink,
output io_protocol_bits_denied,
output [63:0] io_protocol_bits_data,
output io_protocol_bits_corrupt,
output io_beat_ready,
input io_beat_valid,
input [64:0] io_beat_bits_payload,
input io_beat_bits_head,
input io_beat_bits_tail
);
reg is_const;
reg [29:0] const_reg;
wire [29:0] const_0 = io_beat_bits_head ? io_beat_bits_payload[29:0] : const_reg;
wire io_beat_ready_0 = is_const & ~io_beat_bits_tail | io_protocol_ready;
wire _GEN = io_beat_ready_0 & io_beat_valid;
wire _GEN_0 = _GEN & io_beat_bits_head;
always @(posedge clock) begin
if (reset)
is_const <= 1'h1;
else
is_const <= _GEN & io_beat_bits_tail | ~_GEN_0 & is_const;
if (_GEN_0)
const_reg <= io_beat_bits_payload[29:0];
end
assign io_protocol_valid = (~is_const | io_beat_bits_tail) & io_beat_valid;
assign io_protocol_bits_opcode = const_0[29:27];
assign io_protocol_bits_param = const_0[26:25];
assign io_protocol_bits_size = const_0[24:17];
assign io_protocol_bits_source = const_0[16:9];
assign io_protocol_bits_sink = const_0[8:1];
assign io_protocol_bits_denied = const_0[0];
assign io_protocol_bits_data = io_beat_bits_payload[64:1];
assign io_protocol_bits_corrupt = io_beat_bits_payload[0];
assign io_beat_ready = io_beat_ready_0;
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File UnsafeAXI4ToTL.scala:
package ara
import chisel3._
import chisel3.util._
import freechips.rocketchip.amba._
import freechips.rocketchip.amba.axi4._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle {
val data = UInt(dataWidth.W)
val resp = UInt(respWidth.W)
val last = Bool()
val user = BundleMap(userFields)
}
/** Parameters for [[BaseReservableListBuffer]] and all child classes.
*
* @param numEntries Total number of elements that can be stored in the 'data' RAM
* @param numLists Maximum number of linked lists
* @param numBeats Maximum number of beats per entry
*/
case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) {
// Avoid zero-width wires when we call 'log2Ceil'
val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries)
val listBits = if (numLists == 1) 1 else log2Ceil(numLists)
val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats)
}
case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName)
extends MixedAdapterNode(AXI4Imp, TLImp)(
dFn = { case mp =>
TLMasterPortParameters.v2(
masters = mp.masters.zipWithIndex.map { case (m, i) =>
// Support 'numTlTxns' read requests and 'numTlTxns' write requests at once.
val numSourceIds = numTlTxns * 2
TLMasterParameters.v2(
name = m.name,
sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds),
nodePath = m.nodePath
)
},
echoFields = mp.echoFields,
requestFields = AMBAProtField() +: mp.requestFields,
responseKeys = mp.responseKeys
)
},
uFn = { mp =>
AXI4SlavePortParameters(
slaves = mp.managers.map { m =>
val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits))
AXI4SlaveParameters(
address = m.address,
resources = m.resources,
regionType = m.regionType,
executable = m.executable,
nodePath = m.nodePath,
supportsWrite = m.supportsPutPartial.intersect(maxXfer),
supportsRead = m.supportsGet.intersect(maxXfer),
interleavedId = Some(0) // TL2 never interleaves D beats
)
},
beatBytes = mp.beatBytes,
minLatency = mp.minLatency,
responseFields = mp.responseFields,
requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt)
)
}
)
class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule {
require(numTlTxns >= 1)
require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2")
val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt)
lazy val module = new LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
edgeIn.master.masters.foreach { m =>
require(m.aligned, "AXI4ToTL requires aligned requests")
}
val numIds = edgeIn.master.endId
val beatBytes = edgeOut.slave.beatBytes
val maxTransfer = edgeOut.slave.maxTransfer
val maxBeats = maxTransfer / beatBytes
// Look for an Error device to redirect bad requests
val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError")
require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.")
val errorDev = errorDevs.maxBy(_.maxTransfer)
val errorDevAddr = errorDev.address.head.base
require(
errorDev.supportsPutPartial.contains(maxTransfer),
s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer"
)
require(
errorDev.supportsGet.contains(maxTransfer),
s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer"
)
// All of the read-response reordering logic.
val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields)
val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats)
val listBuffer = if (numTlTxns > 1) {
Module(new ReservableListBuffer(listBufData, listBufParams))
} else {
Module(new PassthroughListBuffer(listBufData, listBufParams))
}
// To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to
// 0 for read requests and 1 for write requests.
val isReadSourceBit = 0.U(1.W)
val isWriteSourceBit = 1.U(1.W)
/* Read request logic */
val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val rBytes1 = in.ar.bits.bytes1()
val rSize = OH1ToUInt(rBytes1)
val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize)
val rId = if (numTlTxns > 1) {
Cat(isReadSourceBit, listBuffer.ioReservedIndex)
} else {
isReadSourceBit
}
val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Indicates if there are still valid TileLink source IDs left to use.
val canIssueR = listBuffer.ioReserve.ready
listBuffer.ioReserve.bits := in.ar.bits.id
listBuffer.ioReserve.valid := in.ar.valid && rOut.ready
in.ar.ready := rOut.ready && canIssueR
rOut.valid := in.ar.valid && canIssueR
rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2
rOut.bits.user :<= in.ar.bits.user
rOut.bits.user.lift(AMBAProt).foreach { rProt =>
rProt.privileged := in.ar.bits.prot(0)
rProt.secure := !in.ar.bits.prot(1)
rProt.fetch := in.ar.bits.prot(2)
rProt.bufferable := in.ar.bits.cache(0)
rProt.modifiable := in.ar.bits.cache(1)
rProt.readalloc := in.ar.bits.cache(2)
rProt.writealloc := in.ar.bits.cache(3)
}
/* Write request logic */
// Strip off the MSB, which identifies the transaction as read vs write.
val strippedResponseSourceId = if (numTlTxns > 1) {
out.d.bits.source((out.d.bits.source).getWidth - 2, 0)
} else {
// When there's only 1 TileLink transaction allowed for read/write, then this field is always 0.
0.U(1.W)
}
// Track when a write request burst is in progress.
val writeBurstBusy = RegInit(false.B)
when(in.w.fire) {
writeBurstBusy := !in.w.bits.last
}
val usedWriteIds = RegInit(0.U(numTlTxns.W))
val canIssueW = !usedWriteIds.andR
val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W))
val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W))
usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet
// Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't
// change mid-burst.
val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W))
val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy
val freeWriteIdIndex = OHToUInt(freeWriteIdOH)
freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds
val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val wBytes1 = in.aw.bits.bytes1()
val wSize = OH1ToUInt(wBytes1)
val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize)
val wId = if (numTlTxns > 1) {
Cat(isWriteSourceBit, freeWriteIdIndex)
} else {
isWriteSourceBit
}
val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain
// asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but
// the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb
// bits during a W-channel burst.
in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW
in.w.ready := wOut.ready && in.aw.valid && canIssueW
wOut.valid := in.aw.valid && in.w.valid && canIssueW
wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2
in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ }
wOut.bits.user :<= in.aw.bits.user
wOut.bits.user.lift(AMBAProt).foreach { wProt =>
wProt.privileged := in.aw.bits.prot(0)
wProt.secure := !in.aw.bits.prot(1)
wProt.fetch := in.aw.bits.prot(2)
wProt.bufferable := in.aw.bits.cache(0)
wProt.modifiable := in.aw.bits.cache(1)
wProt.readalloc := in.aw.bits.cache(2)
wProt.writealloc := in.aw.bits.cache(3)
}
// Merge the AXI4 read/write requests into the TL-A channel.
TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut))
/* Read/write response logic */
val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle)))
val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle)))
val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY)
val dHasData = edgeOut.hasData(out.d.bits)
val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d)
val dNumBeats1 = edgeOut.numBeats1(out.d.bits)
// Handle cases where writeack arrives before write is done
val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U
out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck)
listBuffer.ioDataOut.ready := okR.ready
okR.valid := listBuffer.ioDataOut.valid
okB.valid := out.d.valid && !dHasData && !writeEarlyAck
listBuffer.ioResponse.valid := out.d.valid && dHasData
listBuffer.ioResponse.bits.index := strippedResponseSourceId
listBuffer.ioResponse.bits.data.data := out.d.bits.data
listBuffer.ioResponse.bits.data.resp := dResp
listBuffer.ioResponse.bits.data.last := dLast
listBuffer.ioResponse.bits.data.user :<= out.d.bits.user
listBuffer.ioResponse.bits.count := dCount
listBuffer.ioResponse.bits.numBeats1 := dNumBeats1
okR.bits.id := listBuffer.ioDataOut.bits.listIndex
okR.bits.data := listBuffer.ioDataOut.bits.payload.data
okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp
okR.bits.last := listBuffer.ioDataOut.bits.payload.last
okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user
// Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write
// response, mark the write transaction as complete.
val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W))
val writeResponseId = writeIdMap.read(strippedResponseSourceId)
when(wOut.fire) {
writeIdMap.write(freeWriteIdIndex, in.aw.bits.id)
}
when(edgeOut.done(wOut)) {
usedWriteIdsSet := freeWriteIdOH
}
when(okB.fire) {
usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns)
}
okB.bits.id := writeResponseId
okB.bits.resp := dResp
okB.bits.user :<= out.d.bits.user
// AXI4 needs irrevocable behaviour
in.r <> Queue.irrevocable(okR, 1, flow = true)
in.b <> Queue.irrevocable(okB, 1, flow = true)
// Unused channels
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
/* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */
def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = {
val lReqType = reqType.toLowerCase
when(a.valid) {
assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U)
// Narrow transfers and FIXED bursts must be single-beat bursts.
when(a.bits.len =/= 0.U) {
assert(
a.bits.size === log2Ceil(beatBytes).U,
s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)",
1.U << a.bits.size,
a.bits.len + 1.U
)
assert(
a.bits.burst =/= AXI4Parameters.BURST_FIXED,
s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)",
a.bits.len + 1.U
)
}
// Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in
// particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink
// Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts.
}
}
checkRequest(in.ar, "Read")
checkRequest(in.aw, "Write")
}
}
}
object UnsafeAXI4ToTL {
def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = {
val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt))
axi42tl.node
}
}
/* ReservableListBuffer logic, and associated classes. */
class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle {
val index = UInt(params.entryBits.W)
val count = UInt(params.beatBits.W)
val numBeats1 = UInt(params.beatBits.W)
}
class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle {
val listIndex = UInt(params.listBits.W)
}
/** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */
abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends Module {
require(params.numEntries > 0)
require(params.numLists > 0)
val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W))))
val ioReservedIndex = IO(Output(UInt(params.entryBits.W)))
val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params))))
val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params)))
}
/** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve
* linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the
* 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a
* given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order.
*
* ==Constructor==
* @param gen Chisel type of linked list data element
* @param params Other parameters
*
* ==Module IO==
* @param ioReserve Index of list to reserve a new element in
* @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire'
* @param ioResponse Payload containing response data and linked-list-entry index
* @param ioDataOut Payload containing data read from response linked list and linked list index
*/
class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
val valid = RegInit(0.U(params.numLists.W))
val head = Mem(params.numLists, UInt(params.entryBits.W))
val tail = Mem(params.numLists, UInt(params.entryBits.W))
val used = RegInit(0.U(params.numEntries.W))
val next = Mem(params.numEntries, UInt(params.entryBits.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) }
val dataIsPresent = RegInit(0.U(params.numEntries.W))
val beats = Mem(params.numEntries, UInt(params.beatBits.W))
// The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower.
val dataMemReadEnable = WireDefault(false.B)
val dataMemWriteEnable = WireDefault(false.B)
assert(!(dataMemReadEnable && dataMemWriteEnable))
// 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the
// lowest-index entry in the 'data' RAM which is free.
val freeOH = Wire(UInt(params.numEntries.W))
val freeIndex = OHToUInt(freeOH)
freeOH := ~(leftOR(~used) << 1) & ~used
ioReservedIndex := freeIndex
val validSet = WireDefault(0.U(params.numLists.W))
val validClr = WireDefault(0.U(params.numLists.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
val dataIsPresentSet = WireDefault(0.U(params.numEntries.W))
val dataIsPresentClr = WireDefault(0.U(params.numEntries.W))
valid := (valid & ~validClr) | validSet
used := (used & ~usedClr) | usedSet
dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet
/* Reservation logic signals */
val reserveTail = Wire(UInt(params.entryBits.W))
val reserveIsValid = Wire(Bool())
/* Response logic signals */
val responseIndex = Wire(UInt(params.entryBits.W))
val responseListIndex = Wire(UInt(params.listBits.W))
val responseHead = Wire(UInt(params.entryBits.W))
val responseTail = Wire(UInt(params.entryBits.W))
val nextResponseHead = Wire(UInt(params.entryBits.W))
val nextDataIsPresent = Wire(Bool())
val isResponseInOrder = Wire(Bool())
val isEndOfList = Wire(Bool())
val isLastBeat = Wire(Bool())
val isLastResponseBeat = Wire(Bool())
val isLastUnwindBeat = Wire(Bool())
/* Reservation logic */
reserveTail := tail.read(ioReserve.bits)
reserveIsValid := valid(ioReserve.bits)
ioReserve.ready := !used.andR
// When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we
// actually start a new list, rather than appending to a list that's about to disappear.
val reserveResponseSameList = ioReserve.bits === responseListIndex
val appendToAndDestroyList =
ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat
when(ioReserve.fire) {
validSet := UIntToOH(ioReserve.bits, params.numLists)
usedSet := freeOH
when(reserveIsValid && !appendToAndDestroyList) {
next.write(reserveTail, freeIndex)
}.otherwise {
head.write(ioReserve.bits, freeIndex)
}
tail.write(ioReserve.bits, freeIndex)
map.write(freeIndex, ioReserve.bits)
}
/* Response logic */
// The majority of the response logic (reading from and writing to the various RAMs) is common between the
// response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid).
// The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the
// 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and
// response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after
// two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker.
responseHead := head.read(responseListIndex)
responseTail := tail.read(responseListIndex)
nextResponseHead := next.read(responseIndex)
nextDataIsPresent := dataIsPresent(nextResponseHead)
// Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since
// there isn't a next element in the linked list.
isResponseInOrder := responseHead === responseIndex
isEndOfList := responseHead === responseTail
isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1
// When a response's last beat is sent to the output channel, mark it as completed. This can happen in two
// situations:
// 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM
// reservation was never needed.
// 2. An entry is read out of the 'data' SRAM (within the unwind FSM).
when(ioDataOut.fire && isLastBeat) {
// Mark the reservation as no-longer-used.
usedClr := UIntToOH(responseIndex, params.numEntries)
// If the response is in-order, then we're popping an element from this linked list.
when(isEndOfList) {
// Once we pop the last element from a linked list, mark it as no-longer-present.
validClr := UIntToOH(responseListIndex, params.numLists)
}.otherwise {
// Move the linked list's head pointer to the new head pointer.
head.write(responseListIndex, nextResponseHead)
}
}
// If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding.
when(ioResponse.fire && !isResponseInOrder) {
dataMemWriteEnable := true.B
when(isLastResponseBeat) {
dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries)
beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1)
}
}
// Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to.
val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats)
(responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) =>
when(select && dataMemWriteEnable) {
seqMem.write(ioResponse.bits.index, ioResponse.bits.data)
}
}
/* Response unwind logic */
// Unwind FSM state definitions
val sIdle :: sUnwinding :: Nil = Enum(2)
val unwindState = RegInit(sIdle)
val busyUnwinding = unwindState === sUnwinding
val startUnwind = Wire(Bool())
val stopUnwind = Wire(Bool())
when(startUnwind) {
unwindState := sUnwinding
}.elsewhen(stopUnwind) {
unwindState := sIdle
}
assert(!(startUnwind && stopUnwind))
// Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to
// become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is
// invalid.
//
// Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to
// worry about overwriting the 'data' SRAM's output when we start the unwind FSM.
startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent
// Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of
// two things happens:
// 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent)
// 2. There are no more outstanding responses in this list (isEndOfList)
//
// Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are
// passing from 'ioResponse' to 'ioDataOut'.
stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList)
val isUnwindBurstOver = Wire(Bool())
val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable)
// Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of
// beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we
// increment 'beatCounter' until it reaches 'unwindBeats1'.
val unwindBeats1 = Reg(UInt(params.beatBits.W))
val nextBeatCounter = Wire(UInt(params.beatBits.W))
val beatCounter = RegNext(nextBeatCounter)
isUnwindBurstOver := beatCounter === unwindBeats1
when(startNewBurst) {
unwindBeats1 := beats.read(nextResponseHead)
nextBeatCounter := 0.U
}.elsewhen(dataMemReadEnable) {
nextBeatCounter := beatCounter + 1.U
}.otherwise {
nextBeatCounter := beatCounter
}
// When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next
// entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which
// happens at the start of reading a new stored burst).
val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst)
responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index)
// Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the
// SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead
// holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'.
val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex)
// The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid
// until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle).
val unwindDataIsValid = RegInit(false.B)
when(dataMemReadEnable) {
unwindDataIsValid := true.B
}.elsewhen(ioDataOut.fire) {
unwindDataIsValid := false.B
}
isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid
// Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats.
isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat)
// Select which SRAM to read from based on the beat counter.
val dataOutputVec = Wire(Vec(params.numBeats, gen))
val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats)
(nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) =>
dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable)
}
// Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured
// by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading
// from.
val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable)
// Mark 'data' burst entries as no-longer-present as they get read out of the SRAM.
when(dataMemReadEnable) {
dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries)
}
// As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue
// a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know
// we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be
// consumed by the output channel).
val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready
dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem)
// While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need
// 'responseListIndex' to be coherent for the entire unwind process.
val rawResponseListIndex = map.read(responseIndex)
val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst)
responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex)
// Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are
// just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that
// could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be
// single-ported.
ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding
// Either pass an in-order response to the output channel, or data read from the unwind FSM.
ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder)
ioDataOut.bits.listIndex := responseListIndex
ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data)
// It's an error to get a response that isn't associated with a valid linked list.
when(ioResponse.fire || unwindDataIsValid) {
assert(
valid(responseListIndex),
"No linked list exists at index %d, mapped from %d",
responseListIndex,
responseIndex
)
}
when(busyUnwinding && dataMemReadEnable) {
assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order")
}
}
/** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1.
*
* Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to
* reorder any responses, or store any linked lists.
*/
class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1")
val used = RegInit(0.U(params.numEntries.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
used := (used & ~usedClr) | usedSet
ioReserve.ready := used === 0.U
// Store which list index was reserved, we need to return this value when we get a response.
when(ioReserve.fire) {
usedSet := 1.U
map.write(0.U, ioReserve.bits)
}
// There's only one valid linked list entry, which is at index 0.
ioReservedIndex := 0.U
val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1
// Mark the linked list as empty when we get the last beat in a response.
// Note that 'ioResponse.fire === ioDataOut.fire'.
when(ioResponse.fire && isLastResponseBeat) {
usedClr := 1.U
}
// Always pass the response data straight through, since we never need to reorder the response data.
ioDataOut.bits.listIndex := map.read(0.U)
ioDataOut.bits.payload := ioResponse.bits.data
ioDataOut.valid := ioResponse.valid
ioResponse.ready := ioDataOut.ready
}
| module dataMems_480( // @[UnsafeAXI4ToTL.scala:365:62]
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [66:0] R0_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [66:0] W0_data
);
dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62]
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data)
); // @[UnsafeAXI4ToTL.scala:365:62]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_68( // @[AsyncQueue.scala:58:7]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in = 1'h1; // @[ShiftReg.scala:45:23]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_79 io_out_sink_valid_0 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File INToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import consts._
class INToRecFN(intWidth: Int, expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"INToRecFN_i${intWidth}_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val signedIn = Input(Bool())
val in = Input(Bits(intWidth.W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val intAsRawFloat = rawFloatFromIN(io.signedIn, io.in);
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
intAsRawFloat.expWidth,
intWidth,
expWidth,
sigWidth,
flRoundOpt_sigMSBitAlwaysZero | flRoundOpt_neverUnderflows
))
roundAnyRawFNToRecFN.io.invalidExc := false.B
roundAnyRawFNToRecFN.io.infiniteExc := false.B
roundAnyRawFNToRecFN.io.in := intAsRawFloat
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
File rawFloatFromIN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
object rawFloatFromIN
{
def apply(signedIn: Bool, in: Bits): RawFloat =
{
val expWidth = log2Up(in.getWidth) + 1
//*** CHANGE THIS; CAN BE VERY LARGE:
val extIntWidth = 1<<(expWidth - 1)
val sign = signedIn && in(in.getWidth - 1)
val absIn = Mux(sign, -in.asUInt, in.asUInt)
val extAbsIn = (0.U(extIntWidth.W) ## absIn)(extIntWidth - 1, 0)
val adjustedNormDist = countLeadingZeros(extAbsIn)
val sig =
(extAbsIn<<adjustedNormDist)(
extIntWidth - 1, extIntWidth - in.getWidth)
val out = Wire(new RawFloat(expWidth, in.getWidth))
out.isNaN := false.B
out.isInf := false.B
out.isZero := ! sig(in.getWidth - 1)
out.sign := sign
out.sExp := (2.U(2.W) ## ~adjustedNormDist(expWidth - 2, 0)).zext
out.sig := sig
out
}
}
| module INToRecFN_i32_e8_s24_5( // @[INToRecFN.scala:43:7]
input [31:0] io_in, // @[INToRecFN.scala:46:16]
output [32:0] io_out // @[INToRecFN.scala:46:16]
);
wire [31:0] io_in_0 = io_in; // @[INToRecFN.scala:43:7]
wire intAsRawFloat_isNaN = 1'h0; // @[rawFloatFromIN.scala:59:23]
wire intAsRawFloat_isInf = 1'h0; // @[rawFloatFromIN.scala:59:23]
wire [2:0] io_roundingMode = 3'h0; // @[INToRecFN.scala:43:7, :46:16, :60:15]
wire io_signedIn = 1'h1; // @[INToRecFN.scala:43:7]
wire io_detectTininess = 1'h1; // @[INToRecFN.scala:43:7]
wire [32:0] io_out_0; // @[INToRecFN.scala:43:7]
wire [4:0] io_exceptionFlags; // @[INToRecFN.scala:43:7]
wire _intAsRawFloat_sign_T = io_in_0[31]; // @[rawFloatFromIN.scala:51:34]
wire intAsRawFloat_sign = _intAsRawFloat_sign_T; // @[rawFloatFromIN.scala:51:{29,34}]
wire intAsRawFloat_sign_0 = intAsRawFloat_sign; // @[rawFloatFromIN.scala:51:29, :59:23]
wire [32:0] _intAsRawFloat_absIn_T = 33'h0 - {1'h0, io_in_0}; // @[rawFloatFromIN.scala:52:31]
wire [31:0] _intAsRawFloat_absIn_T_1 = _intAsRawFloat_absIn_T[31:0]; // @[rawFloatFromIN.scala:52:31]
wire [31:0] intAsRawFloat_absIn = intAsRawFloat_sign ? _intAsRawFloat_absIn_T_1 : io_in_0; // @[rawFloatFromIN.scala:51:29, :52:{24,31}]
wire [63:0] _intAsRawFloat_extAbsIn_T = {32'h0, intAsRawFloat_absIn}; // @[rawFloatFromIN.scala:52:24, :53:44]
wire [31:0] intAsRawFloat_extAbsIn = _intAsRawFloat_extAbsIn_T[31:0]; // @[rawFloatFromIN.scala:53:{44,53}]
wire _intAsRawFloat_adjustedNormDist_T = intAsRawFloat_extAbsIn[0]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_1 = intAsRawFloat_extAbsIn[1]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_2 = intAsRawFloat_extAbsIn[2]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_3 = intAsRawFloat_extAbsIn[3]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_4 = intAsRawFloat_extAbsIn[4]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_5 = intAsRawFloat_extAbsIn[5]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_6 = intAsRawFloat_extAbsIn[6]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_7 = intAsRawFloat_extAbsIn[7]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_8 = intAsRawFloat_extAbsIn[8]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_9 = intAsRawFloat_extAbsIn[9]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_10 = intAsRawFloat_extAbsIn[10]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_11 = intAsRawFloat_extAbsIn[11]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_12 = intAsRawFloat_extAbsIn[12]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_13 = intAsRawFloat_extAbsIn[13]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_14 = intAsRawFloat_extAbsIn[14]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_15 = intAsRawFloat_extAbsIn[15]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_16 = intAsRawFloat_extAbsIn[16]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_17 = intAsRawFloat_extAbsIn[17]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_18 = intAsRawFloat_extAbsIn[18]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_19 = intAsRawFloat_extAbsIn[19]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_20 = intAsRawFloat_extAbsIn[20]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_21 = intAsRawFloat_extAbsIn[21]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_22 = intAsRawFloat_extAbsIn[22]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_23 = intAsRawFloat_extAbsIn[23]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_24 = intAsRawFloat_extAbsIn[24]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_25 = intAsRawFloat_extAbsIn[25]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_26 = intAsRawFloat_extAbsIn[26]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_27 = intAsRawFloat_extAbsIn[27]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_28 = intAsRawFloat_extAbsIn[28]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_29 = intAsRawFloat_extAbsIn[29]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_30 = intAsRawFloat_extAbsIn[30]; // @[rawFloatFromIN.scala:53:53]
wire _intAsRawFloat_adjustedNormDist_T_31 = intAsRawFloat_extAbsIn[31]; // @[rawFloatFromIN.scala:53:53]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_32 = {4'hF, ~_intAsRawFloat_adjustedNormDist_T_1}; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_33 = _intAsRawFloat_adjustedNormDist_T_2 ? 5'h1D : _intAsRawFloat_adjustedNormDist_T_32; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_34 = _intAsRawFloat_adjustedNormDist_T_3 ? 5'h1C : _intAsRawFloat_adjustedNormDist_T_33; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_35 = _intAsRawFloat_adjustedNormDist_T_4 ? 5'h1B : _intAsRawFloat_adjustedNormDist_T_34; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_36 = _intAsRawFloat_adjustedNormDist_T_5 ? 5'h1A : _intAsRawFloat_adjustedNormDist_T_35; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_37 = _intAsRawFloat_adjustedNormDist_T_6 ? 5'h19 : _intAsRawFloat_adjustedNormDist_T_36; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_38 = _intAsRawFloat_adjustedNormDist_T_7 ? 5'h18 : _intAsRawFloat_adjustedNormDist_T_37; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_39 = _intAsRawFloat_adjustedNormDist_T_8 ? 5'h17 : _intAsRawFloat_adjustedNormDist_T_38; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_40 = _intAsRawFloat_adjustedNormDist_T_9 ? 5'h16 : _intAsRawFloat_adjustedNormDist_T_39; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_41 = _intAsRawFloat_adjustedNormDist_T_10 ? 5'h15 : _intAsRawFloat_adjustedNormDist_T_40; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_42 = _intAsRawFloat_adjustedNormDist_T_11 ? 5'h14 : _intAsRawFloat_adjustedNormDist_T_41; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_43 = _intAsRawFloat_adjustedNormDist_T_12 ? 5'h13 : _intAsRawFloat_adjustedNormDist_T_42; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_44 = _intAsRawFloat_adjustedNormDist_T_13 ? 5'h12 : _intAsRawFloat_adjustedNormDist_T_43; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_45 = _intAsRawFloat_adjustedNormDist_T_14 ? 5'h11 : _intAsRawFloat_adjustedNormDist_T_44; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_46 = _intAsRawFloat_adjustedNormDist_T_15 ? 5'h10 : _intAsRawFloat_adjustedNormDist_T_45; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_47 = _intAsRawFloat_adjustedNormDist_T_16 ? 5'hF : _intAsRawFloat_adjustedNormDist_T_46; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_48 = _intAsRawFloat_adjustedNormDist_T_17 ? 5'hE : _intAsRawFloat_adjustedNormDist_T_47; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_49 = _intAsRawFloat_adjustedNormDist_T_18 ? 5'hD : _intAsRawFloat_adjustedNormDist_T_48; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_50 = _intAsRawFloat_adjustedNormDist_T_19 ? 5'hC : _intAsRawFloat_adjustedNormDist_T_49; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_51 = _intAsRawFloat_adjustedNormDist_T_20 ? 5'hB : _intAsRawFloat_adjustedNormDist_T_50; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_52 = _intAsRawFloat_adjustedNormDist_T_21 ? 5'hA : _intAsRawFloat_adjustedNormDist_T_51; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_53 = _intAsRawFloat_adjustedNormDist_T_22 ? 5'h9 : _intAsRawFloat_adjustedNormDist_T_52; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_54 = _intAsRawFloat_adjustedNormDist_T_23 ? 5'h8 : _intAsRawFloat_adjustedNormDist_T_53; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_55 = _intAsRawFloat_adjustedNormDist_T_24 ? 5'h7 : _intAsRawFloat_adjustedNormDist_T_54; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_56 = _intAsRawFloat_adjustedNormDist_T_25 ? 5'h6 : _intAsRawFloat_adjustedNormDist_T_55; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_57 = _intAsRawFloat_adjustedNormDist_T_26 ? 5'h5 : _intAsRawFloat_adjustedNormDist_T_56; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_58 = _intAsRawFloat_adjustedNormDist_T_27 ? 5'h4 : _intAsRawFloat_adjustedNormDist_T_57; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_59 = _intAsRawFloat_adjustedNormDist_T_28 ? 5'h3 : _intAsRawFloat_adjustedNormDist_T_58; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_60 = _intAsRawFloat_adjustedNormDist_T_29 ? 5'h2 : _intAsRawFloat_adjustedNormDist_T_59; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_adjustedNormDist_T_61 = _intAsRawFloat_adjustedNormDist_T_30 ? 5'h1 : _intAsRawFloat_adjustedNormDist_T_60; // @[Mux.scala:50:70]
wire [4:0] intAsRawFloat_adjustedNormDist = _intAsRawFloat_adjustedNormDist_T_31 ? 5'h0 : _intAsRawFloat_adjustedNormDist_T_61; // @[Mux.scala:50:70]
wire [4:0] _intAsRawFloat_out_sExp_T = intAsRawFloat_adjustedNormDist; // @[Mux.scala:50:70]
wire [62:0] _intAsRawFloat_sig_T = {31'h0, intAsRawFloat_extAbsIn} << intAsRawFloat_adjustedNormDist; // @[Mux.scala:50:70]
wire [31:0] intAsRawFloat_sig = _intAsRawFloat_sig_T[31:0]; // @[rawFloatFromIN.scala:56:{22,41}]
wire _intAsRawFloat_out_isZero_T_1; // @[rawFloatFromIN.scala:62:23]
wire [7:0] _intAsRawFloat_out_sExp_T_3; // @[rawFloatFromIN.scala:64:72]
wire intAsRawFloat_isZero; // @[rawFloatFromIN.scala:59:23]
wire [7:0] intAsRawFloat_sExp; // @[rawFloatFromIN.scala:59:23]
wire [32:0] intAsRawFloat_sig_0; // @[rawFloatFromIN.scala:59:23]
wire _intAsRawFloat_out_isZero_T = intAsRawFloat_sig[31]; // @[rawFloatFromIN.scala:56:41, :62:28]
assign _intAsRawFloat_out_isZero_T_1 = ~_intAsRawFloat_out_isZero_T; // @[rawFloatFromIN.scala:62:{23,28}]
assign intAsRawFloat_isZero = _intAsRawFloat_out_isZero_T_1; // @[rawFloatFromIN.scala:59:23, :62:23]
wire [4:0] _intAsRawFloat_out_sExp_T_1 = ~_intAsRawFloat_out_sExp_T; // @[rawFloatFromIN.scala:64:{36,53}]
wire [6:0] _intAsRawFloat_out_sExp_T_2 = {2'h2, _intAsRawFloat_out_sExp_T_1}; // @[rawFloatFromIN.scala:64:{33,36}]
assign _intAsRawFloat_out_sExp_T_3 = {1'h0, _intAsRawFloat_out_sExp_T_2}; // @[rawFloatFromIN.scala:64:{33,72}]
assign intAsRawFloat_sExp = _intAsRawFloat_out_sExp_T_3; // @[rawFloatFromIN.scala:59:23, :64:72]
assign intAsRawFloat_sig_0 = {1'h0, intAsRawFloat_sig}; // @[rawFloatFromIN.scala:56:41, :59:23, :65:20]
RoundAnyRawFNToRecFN_ie6_is32_oe8_os24_5 roundAnyRawFNToRecFN ( // @[INToRecFN.scala:60:15]
.io_in_isZero (intAsRawFloat_isZero), // @[rawFloatFromIN.scala:59:23]
.io_in_sign (intAsRawFloat_sign_0), // @[rawFloatFromIN.scala:59:23]
.io_in_sExp (intAsRawFloat_sExp), // @[rawFloatFromIN.scala:59:23]
.io_in_sig (intAsRawFloat_sig_0), // @[rawFloatFromIN.scala:59:23]
.io_out (io_out_0),
.io_exceptionFlags (io_exceptionFlags)
); // @[INToRecFN.scala:60:15]
assign io_out = io_out_0; // @[INToRecFN.scala:43:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Checksum.scala:
package icenet
import chisel3._
import chisel3.util._
import freechips.rocketchip.unittest.UnitTest
import freechips.rocketchip.util.{DecoupledHelper, UIntIsOneOf}
import IceNetConsts._
import NetworkHelpers._
class ChecksumCalcRequest extends Bundle {
val check = Bool()
val start = UInt(16.W)
val init = UInt(16.W)
}
class ChecksumRewriteRequest extends Bundle {
val check = Bool()
val offset = UInt(16.W)
val start = UInt(16.W)
val init = UInt(16.W)
}
class ChecksumCalc(dataBits: Int) extends Module {
val dataBytes = dataBits / 8
val io = IO(new Bundle {
val req = Flipped(Decoupled(new ChecksumCalcRequest))
val stream = new StreamIO(dataBits)
val result = Decoupled(UInt(16.W))
})
val csum = Reg(UInt((dataBits + 16).W))
val check = Reg(Bool())
val start = Reg(UInt(16.W))
val startPos = Reg(UInt(16.W))
val nextStartPos = startPos + dataBytes.U
val sumMask = (0 until dataBytes).map { i =>
io.stream.in.bits.keep(i) && (startPos + i.U) >= start
}
val sumData = io.stream.in.bits.data & FillInterleaved(8, sumMask)
val s_req :: s_stream :: s_fold :: s_result :: Nil = Enum(4)
val state = RegInit(s_req)
io.req.ready := state === s_req
io.stream.out.valid := state === s_stream && io.stream.in.valid
io.stream.in.ready := state === s_stream && io.stream.out.ready
io.stream.out.bits := io.stream.in.bits
io.result.valid := state === s_result
io.result.bits := csum(15, 0)
when (io.req.fire) {
check := io.req.bits.check
start := io.req.bits.start
csum := io.req.bits.init
startPos := 0.U
state := s_stream
}
when (io.stream.in.fire) {
when (check) {
csum := csum + sumData
startPos := nextStartPos
}
when (io.stream.in.bits.last) {
state := Mux(check, s_fold, s_req)
}
}
when (state === s_fold) {
val upper = csum(15 + dataBits, 16)
val lower = csum(15, 0)
when (upper === 0.U) {
csum := ~lower
state := s_result
} .otherwise {
csum := upper + lower
}
}
when (io.result.fire) { state := s_req }
}
class ChecksumRewrite(dataBits: Int, nBufFlits: Int) extends Module {
val dataBytes = dataBits / 8
val io = IO(new Bundle {
val req = Flipped(Decoupled(new ChecksumRewriteRequest))
val stream = new StreamIO(dataBits)
})
val reqq = Module(new Queue(new ChecksumRewriteRequest, 2))
val calc = Module(new ChecksumCalc(dataBits))
val buffer = Module(new Queue(new StreamChannel(dataBits), nBufFlits))
val offset = Reg(UInt(16.W))
val check = Reg(Bool())
val csum = Reg(UInt(16.W))
val reqHelper = DecoupledHelper(
io.req.valid,
calc.io.req.ready,
reqq.io.enq.ready)
io.req.ready := reqHelper.fire(io.req.valid)
calc.io.req.valid := reqHelper.fire(calc.io.req.ready)
calc.io.req.bits.check := io.req.bits.check
calc.io.req.bits.start := io.req.bits.start
calc.io.req.bits.init := io.req.bits.init
reqq.io.enq.valid := reqHelper.fire(reqq.io.enq.ready)
reqq.io.enq.bits := io.req.bits
calc.io.stream.in <> io.stream.in
buffer.io.enq <> calc.io.stream.out
val byteOffBits = log2Ceil(dataBytes)
val startPos = Reg(UInt(16.W))
val nextStartPos = startPos + dataBytes.U
val baseData = buffer.io.deq.bits.data
val shiftAmt = Cat((offset - startPos)(byteOffBits-1, 0), 0.U(3.W))
val dataMask = ~(~0.U(16.W) << shiftAmt)
val csumShifted = csum << shiftAmt
val replace = check && (offset >= startPos) && (offset < nextStartPos)
val outData = Mux(replace, (baseData & dataMask) | csumShifted, baseData)
val s_req :: s_wait :: s_flush :: Nil = Enum(3)
val state = RegInit(s_req)
when (reqq.io.deq.fire) {
check := reqq.io.deq.bits.check
offset := reqq.io.deq.bits.offset
startPos := 0.U
state := Mux(reqq.io.deq.bits.check, s_wait, s_flush)
}
when (calc.io.result.fire) {
csum := calc.io.result.bits
state := s_flush
}
when (io.stream.out.fire) {
startPos := nextStartPos
when (io.stream.out.bits.last) { state := s_req }
}
val deqOK = (state === s_flush || nextStartPos <= offset)
reqq.io.deq.ready := state === s_req
calc.io.result.ready := state === s_wait
io.stream.out.valid := buffer.io.deq.valid && deqOK
buffer.io.deq.ready := io.stream.out.ready && deqOK
io.stream.out.bits := buffer.io.deq.bits
io.stream.out.bits.data := outData
}
class ChecksumTest extends UnitTest {
val offset = 6
val init = 0x4315
val start = 2
val data = Seq(0xdead, 0xbeef, 0x7432, 0x0000, 0xf00d, 0x3163, 0x9821, 0x1543)
val take = Seq(true, true, true, true, true, true, true, false)
val keep = take.map(if (_) 0x3 else 0x0)
var csum = init + data.zip(take).drop(start/2)
.filter(_._2).map(_._1).reduce(_ + _)
while (csum > 0xffff) {
csum = (csum >> 16) + (csum & 0xffff)
}
csum = ~csum & 0xffff
val expected = data.take(offset/2) ++
Seq(csum) ++ data.drop(offset/2+1)
def seqToVec(seq: Seq[Int], step: Int, nbits: Int) = {
VecInit((0 until seq.length by step).map { i =>
Cat((i until (i + step)).map(seq(_).U(nbits.W)).reverse)
})
}
val dataBits = 32
val dataBytes = dataBits / 8
val shortsPerFlit = dataBits / 16
val dataVec = seqToVec(data, shortsPerFlit, 16)
val expectedVec = seqToVec(expected, shortsPerFlit, 16)
val keepVec = seqToVec(keep, shortsPerFlit, 2)
val s_start :: s_req :: s_input :: s_output :: s_done :: Nil = Enum(5)
val state = RegInit(s_start)
val rewriter = Module(new ChecksumRewrite(
dataBits, data.length/shortsPerFlit))
val (inIdx, inDone) = Counter(rewriter.io.stream.in.fire, dataVec.length)
val (outIdx, outDone) = Counter(rewriter.io.stream.out.fire, expectedVec.length)
rewriter.io.req.valid := state === s_req
rewriter.io.req.bits.check := true.B
rewriter.io.req.bits.start := start.U
rewriter.io.req.bits.init := init.U
rewriter.io.req.bits.offset := offset.U
rewriter.io.stream.in.valid := state === s_input
rewriter.io.stream.in.bits.data := dataVec(inIdx)
rewriter.io.stream.in.bits.keep := keepVec(inIdx)
rewriter.io.stream.in.bits.last := inIdx === (dataVec.length-1).U
rewriter.io.stream.out.ready := state === s_output
io.finished := state === s_done
when (state === s_start && io.start) { state := s_req }
when (rewriter.io.req.fire) { state := s_input }
when (inDone) { state := s_output }
when (outDone) { state := s_done }
assert(!rewriter.io.stream.out.valid ||
rewriter.io.stream.out.bits.data === expectedVec(outIdx),
"ChecksumTest: got wrong data")
assert(!rewriter.io.stream.out.valid ||
rewriter.io.stream.out.bits.keep === keepVec(outIdx),
"ChecksumTest: got wrong keep")
}
class TCPChecksumOffloadResult extends Bundle {
val correct = Bool()
val checked = Bool()
}
class TCPChecksumOffload(dataBits: Int) extends Module {
val io = IO(new Bundle {
val in = Flipped(Decoupled(new StreamChannel(dataBits)))
val result = Decoupled(new TCPChecksumOffloadResult)
})
class FullHeader extends Bundle {
val tcp = new TCPHeader
val ipv4 = new IPv4Header
val eth = new EthernetHeader
}
class PseudoHeader extends Bundle {
val tcp = new TCPHeader
val length = UInt(16.W)
val protocol = UInt(8.W)
val zeros = UInt(8.W)
val dest_ip = UInt(32.W)
val source_ip = UInt(32.W)
}
val dataBytes = dataBits/8
val headerBytes = ETH_HEAD_BYTES + IPV4_HEAD_BYTES + TCP_HEAD_BYTES
val headerWords = headerBytes / dataBytes
val headerVec = Reg(Vec(headerWords, UInt(dataBits.W)))
val header = headerVec.asTypeOf(new FullHeader)
val headerIdx = RegInit(0.U(log2Ceil(headerWords).W))
val pseudoHeaderBytes = 12 + TCP_HEAD_BYTES
val pseudoHeaderWords = pseudoHeaderBytes / dataBytes
val pseudoHeader = Wire(new PseudoHeader)
val pseudoHeaderVec = pseudoHeader.asTypeOf(
Vec(pseudoHeaderWords, UInt(dataBits.W)))
pseudoHeader.tcp := header.tcp
pseudoHeader.length := htons(ntohs(header.ipv4.total_length) - IPV4_HEAD_BYTES.U)
pseudoHeader.protocol := header.ipv4.protocol
pseudoHeader.zeros := 0.U
pseudoHeader.dest_ip := header.ipv4.dest_ip
pseudoHeader.source_ip := header.ipv4.source_ip
require(dataBits >= 16)
require(headerBytes % dataBytes == 0)
require(pseudoHeaderBytes % dataBytes == 0)
val (s_header_in :: s_csum_req ::
s_header_csum :: s_body_csum ::
s_passthru :: s_result :: Nil) = Enum(6)
val state = RegInit(s_header_in)
val headerOK =
header.eth.ethType === IPV4_ETHTYPE.U &&
header.ipv4.protocol === TCP_PROTOCOL.U &&
header.ipv4.ihl === 5.U
val headerChannel = Wire(new StreamChannel(dataBits))
headerChannel.data := pseudoHeaderVec(headerIdx)
headerChannel.keep := ~0.U(dataBits.W)
headerChannel.last := false.B
val resultExpected = RegInit(false.B)
val csum = Module(new ChecksumCalc(dataBits))
csum.io.req.valid := state === s_csum_req
csum.io.req.bits.check := true.B
csum.io.req.bits.start := 0.U
csum.io.req.bits.init := 0.U
csum.io.stream.in.valid :=
(state === s_header_csum) || (state === s_body_csum && io.in.valid)
csum.io.stream.in.bits := Mux(
state === s_header_csum, headerChannel, io.in.bits)
csum.io.stream.out.ready := true.B // just ignore output
io.in.ready := state.isOneOf(s_header_in, s_passthru) ||
(state === s_body_csum && csum.io.stream.in.ready)
io.result.valid := state === s_result && (!resultExpected || csum.io.result.valid)
io.result.bits.correct := csum.io.result.bits === 0.U
io.result.bits.checked := resultExpected
csum.io.result.ready := state === s_result && resultExpected && io.result.ready
when (io.in.fire) {
when (io.in.bits.last) {
state := s_result
} .elsewhen (state === s_header_in) {
headerVec(headerIdx) := io.in.bits.data
headerIdx := headerIdx + 1.U
when (headerIdx === (headerWords-1).U) {
resultExpected := headerOK
state := Mux(headerOK, s_csum_req, s_passthru)
}
}
}
when (csum.io.req.fire) {
headerIdx := 0.U
state := s_header_csum
}
when (state === s_header_csum && csum.io.stream.in.ready) {
headerIdx := headerIdx + 1.U
when (headerIdx === (pseudoHeaderWords-1).U) {
state := s_body_csum
}
}
when (io.result.fire) {
headerIdx := 0.U
resultExpected := false.B
state := s_header_in
}
}
class ChecksumTCPVerify extends UnitTest {
val packets = Seq("""00 00 00 12 6d 00 00 03 00 12 6d 00 00 02 08 00
|45 00 00 3c 45 9b 40 00 40 06 9c fb ac 10 00 02
|ac 10 00 03 c2 a8 14 51 22 ad 9e d6 00 00 00 00
|a0 02 72 10 bf 07 00 00 02 04 05 b4 04 02 08 0a
|43 ec e2 58 00 00 00 00 01 03 03 07 00 00 00 00""",
"""00 00 00 12 6d 00 00 02 00 12 6d 00 00 03 08 06
|00 01 08 00 06 04 00 02 00 12 6d 00 00 03 ac 10
|00 03 00 12 6d 00 00 02 ac 10 00 02 00 02 00 00""")
val dataBytes = packets.map(packetStr =>
packetStr.stripMargin.replaceAll("\n", " ")
.split(" ").map(BigInt(_, 16)))
val dataWords = VecInit(dataBytes.flatMap(
bytes => (0 until bytes.length by 8).map(
i => Cat(bytes.slice(i, i + 8).map(_.U(8.W)).reverse))))
val dataLast = VecInit(dataBytes.map(_.length / 8).flatMap(
nWords => Seq.fill(nWords-1)(false.B) :+ true.B))
val expectedResults = VecInit(3.U, 2.U)
val started = RegInit(false.B)
val inputValid = RegInit(false.B)
val outputReady = RegInit(false.B)
val resultReady = RegInit(false.B)
val offload = Module(new TCPChecksumOffload(NET_IF_WIDTH))
val (inIdx, inDone) = Counter(offload.io.in.fire, dataWords.length)
val (resultIdx, resultDone) = Counter(offload.io.result.fire, expectedResults.length)
offload.io.in.valid := inputValid
offload.io.in.bits.data := dataWords(inIdx)
offload.io.in.bits.keep := NET_FULL_KEEP
offload.io.in.bits.last := dataLast(inIdx)
offload.io.result.ready := resultReady
io.finished := started && !inputValid && !resultReady
when (!started && io.start) {
started := true.B
inputValid := true.B
resultReady := true.B
}
when (inDone) { inputValid := false.B }
when (resultDone) { resultReady := false.B }
assert(!offload.io.result.valid ||
offload.io.result.bits.asUInt === expectedResults(resultIdx),
"ChecksumTCPVerify: checksum was not correct")
}
| module ChecksumCalc_1( // @[Checksum.scala:23:7]
input clock, // @[Checksum.scala:23:7]
input reset, // @[Checksum.scala:23:7]
output io_req_ready, // @[Checksum.scala:26:14]
input io_req_valid, // @[Checksum.scala:26:14]
output io_stream_in_ready, // @[Checksum.scala:26:14]
input io_stream_in_valid, // @[Checksum.scala:26:14]
input [63:0] io_stream_in_bits_data, // @[Checksum.scala:26:14]
input [7:0] io_stream_in_bits_keep, // @[Checksum.scala:26:14]
input io_stream_in_bits_last, // @[Checksum.scala:26:14]
input io_result_ready, // @[Checksum.scala:26:14]
output io_result_valid, // @[Checksum.scala:26:14]
output [15:0] io_result_bits // @[Checksum.scala:26:14]
);
wire io_req_valid_0 = io_req_valid; // @[Checksum.scala:23:7]
wire io_stream_in_valid_0 = io_stream_in_valid; // @[Checksum.scala:23:7]
wire [63:0] io_stream_in_bits_data_0 = io_stream_in_bits_data; // @[Checksum.scala:23:7]
wire [7:0] io_stream_in_bits_keep_0 = io_stream_in_bits_keep; // @[Checksum.scala:23:7]
wire io_stream_in_bits_last_0 = io_stream_in_bits_last; // @[Checksum.scala:23:7]
wire io_result_ready_0 = io_result_ready; // @[Checksum.scala:23:7]
wire [15:0] io_req_bits_start = 16'h0; // @[Checksum.scala:23:7]
wire [15:0] io_req_bits_init = 16'h0; // @[Checksum.scala:23:7]
wire io_req_bits_check = 1'h1; // @[Checksum.scala:23:7]
wire io_stream_out_ready = 1'h1; // @[Checksum.scala:23:7]
wire _sumMask_T_3 = 1'h1; // @[Checksum.scala:38:51]
wire _sumMask_T_7 = 1'h1; // @[Checksum.scala:38:51]
wire _sumMask_T_11 = 1'h1; // @[Checksum.scala:38:51]
wire _sumMask_T_15 = 1'h1; // @[Checksum.scala:38:51]
wire _sumMask_T_19 = 1'h1; // @[Checksum.scala:38:51]
wire _sumMask_T_23 = 1'h1; // @[Checksum.scala:38:51]
wire _sumMask_T_27 = 1'h1; // @[Checksum.scala:38:51]
wire _sumMask_T_31 = 1'h1; // @[Checksum.scala:38:51]
wire _io_req_ready_T; // @[Checksum.scala:45:25]
wire [1:0] _state_T = 2'h2; // @[Checksum.scala:67:19]
wire _io_stream_in_ready_T_1; // @[Checksum.scala:47:44]
wire [63:0] io_stream_out_bits_data = io_stream_in_bits_data_0; // @[Checksum.scala:23:7]
wire [7:0] io_stream_out_bits_keep = io_stream_in_bits_keep_0; // @[Checksum.scala:23:7]
wire io_stream_out_bits_last = io_stream_in_bits_last_0; // @[Checksum.scala:23:7]
wire _io_stream_out_valid_T_1; // @[Checksum.scala:46:45]
wire _io_result_valid_T; // @[Checksum.scala:49:28]
wire [15:0] _io_result_bits_T; // @[Checksum.scala:50:25]
wire io_req_ready_0; // @[Checksum.scala:23:7]
wire io_stream_in_ready_0; // @[Checksum.scala:23:7]
wire io_stream_out_valid; // @[Checksum.scala:23:7]
wire io_result_valid_0; // @[Checksum.scala:23:7]
wire [15:0] io_result_bits_0; // @[Checksum.scala:23:7]
reg [79:0] csum; // @[Checksum.scala:32:17]
reg [15:0] startPos; // @[Checksum.scala:35:21]
wire [16:0] _sumMask_T_1 = {1'h0, startPos}; // @[Checksum.scala:35:21, :36:31, :38:44]
wire [16:0] _nextStartPos_T = _sumMask_T_1 + 17'h8; // @[Checksum.scala:36:31, :38:44]
wire [15:0] nextStartPos = _nextStartPos_T[15:0]; // @[Checksum.scala:36:31]
wire _sumMask_T = io_stream_in_bits_keep_0[0]; // @[Checksum.scala:23:7, :38:27]
wire sumMask_0 = _sumMask_T; // @[Checksum.scala:38:{27,31}]
wire [15:0] _sumMask_T_2 = _sumMask_T_1[15:0]; // @[Checksum.scala:38:44]
wire _sumMask_T_4 = io_stream_in_bits_keep_0[1]; // @[Checksum.scala:23:7, :38:27]
wire sumMask_1 = _sumMask_T_4; // @[Checksum.scala:38:{27,31}]
wire [16:0] _sumMask_T_5 = _sumMask_T_1 + 17'h1; // @[Checksum.scala:38:44]
wire [15:0] _sumMask_T_6 = _sumMask_T_5[15:0]; // @[Checksum.scala:38:44]
wire _sumMask_T_8 = io_stream_in_bits_keep_0[2]; // @[Checksum.scala:23:7, :38:27]
wire sumMask_2 = _sumMask_T_8; // @[Checksum.scala:38:{27,31}]
wire [16:0] _sumMask_T_9 = _sumMask_T_1 + 17'h2; // @[Checksum.scala:38:44]
wire [15:0] _sumMask_T_10 = _sumMask_T_9[15:0]; // @[Checksum.scala:38:44]
wire _sumMask_T_12 = io_stream_in_bits_keep_0[3]; // @[Checksum.scala:23:7, :38:27]
wire sumMask_3 = _sumMask_T_12; // @[Checksum.scala:38:{27,31}]
wire [16:0] _sumMask_T_13 = _sumMask_T_1 + 17'h3; // @[Checksum.scala:38:44]
wire [15:0] _sumMask_T_14 = _sumMask_T_13[15:0]; // @[Checksum.scala:38:44]
wire _sumMask_T_16 = io_stream_in_bits_keep_0[4]; // @[Checksum.scala:23:7, :38:27]
wire sumMask_4 = _sumMask_T_16; // @[Checksum.scala:38:{27,31}]
wire [16:0] _sumMask_T_17 = _sumMask_T_1 + 17'h4; // @[Checksum.scala:38:44]
wire [15:0] _sumMask_T_18 = _sumMask_T_17[15:0]; // @[Checksum.scala:38:44]
wire _sumMask_T_20 = io_stream_in_bits_keep_0[5]; // @[Checksum.scala:23:7, :38:27]
wire sumMask_5 = _sumMask_T_20; // @[Checksum.scala:38:{27,31}]
wire [16:0] _sumMask_T_21 = _sumMask_T_1 + 17'h5; // @[Checksum.scala:38:44]
wire [15:0] _sumMask_T_22 = _sumMask_T_21[15:0]; // @[Checksum.scala:38:44]
wire _sumMask_T_24 = io_stream_in_bits_keep_0[6]; // @[Checksum.scala:23:7, :38:27]
wire sumMask_6 = _sumMask_T_24; // @[Checksum.scala:38:{27,31}]
wire [16:0] _sumMask_T_25 = _sumMask_T_1 + 17'h6; // @[Checksum.scala:38:44]
wire [15:0] _sumMask_T_26 = _sumMask_T_25[15:0]; // @[Checksum.scala:38:44]
wire _sumMask_T_28 = io_stream_in_bits_keep_0[7]; // @[Checksum.scala:23:7, :38:27]
wire sumMask_7 = _sumMask_T_28; // @[Checksum.scala:38:{27,31}]
wire [16:0] _sumMask_T_29 = _sumMask_T_1 + 17'h7; // @[Checksum.scala:38:44]
wire [15:0] _sumMask_T_30 = _sumMask_T_29[15:0]; // @[Checksum.scala:38:44]
wire [7:0] _sumData_T = {8{sumMask_0}}; // @[Checksum.scala:38:31, :40:57]
wire [7:0] _sumData_T_1 = {8{sumMask_1}}; // @[Checksum.scala:38:31, :40:57]
wire [7:0] _sumData_T_2 = {8{sumMask_2}}; // @[Checksum.scala:38:31, :40:57]
wire [7:0] _sumData_T_3 = {8{sumMask_3}}; // @[Checksum.scala:38:31, :40:57]
wire [7:0] _sumData_T_4 = {8{sumMask_4}}; // @[Checksum.scala:38:31, :40:57]
wire [7:0] _sumData_T_5 = {8{sumMask_5}}; // @[Checksum.scala:38:31, :40:57]
wire [7:0] _sumData_T_6 = {8{sumMask_6}}; // @[Checksum.scala:38:31, :40:57]
wire [7:0] _sumData_T_7 = {8{sumMask_7}}; // @[Checksum.scala:38:31, :40:57]
wire [15:0] sumData_lo_lo = {_sumData_T_1, _sumData_T}; // @[Checksum.scala:40:57]
wire [15:0] sumData_lo_hi = {_sumData_T_3, _sumData_T_2}; // @[Checksum.scala:40:57]
wire [31:0] sumData_lo = {sumData_lo_hi, sumData_lo_lo}; // @[Checksum.scala:40:57]
wire [15:0] sumData_hi_lo = {_sumData_T_5, _sumData_T_4}; // @[Checksum.scala:40:57]
wire [15:0] sumData_hi_hi = {_sumData_T_7, _sumData_T_6}; // @[Checksum.scala:40:57]
wire [31:0] sumData_hi = {sumData_hi_hi, sumData_hi_lo}; // @[Checksum.scala:40:57]
wire [63:0] _sumData_T_8 = {sumData_hi, sumData_lo}; // @[Checksum.scala:40:57]
wire [63:0] sumData = io_stream_in_bits_data_0 & _sumData_T_8; // @[Checksum.scala:23:7, :40:{40,57}]
reg [1:0] state; // @[Checksum.scala:43:22]
assign _io_req_ready_T = state == 2'h0; // @[Checksum.scala:43:22, :45:25]
assign io_req_ready_0 = _io_req_ready_T; // @[Checksum.scala:23:7, :45:25]
wire _GEN = state == 2'h1; // @[Checksum.scala:43:22, :46:32]
wire _io_stream_out_valid_T; // @[Checksum.scala:46:32]
assign _io_stream_out_valid_T = _GEN; // @[Checksum.scala:46:32]
wire _io_stream_in_ready_T; // @[Checksum.scala:47:31]
assign _io_stream_in_ready_T = _GEN; // @[Checksum.scala:46:32, :47:31]
assign _io_stream_out_valid_T_1 = _io_stream_out_valid_T & io_stream_in_valid_0; // @[Checksum.scala:23:7, :46:{32,45}]
assign io_stream_out_valid = _io_stream_out_valid_T_1; // @[Checksum.scala:23:7, :46:45]
assign _io_stream_in_ready_T_1 = _io_stream_in_ready_T; // @[Checksum.scala:47:{31,44}]
assign io_stream_in_ready_0 = _io_stream_in_ready_T_1; // @[Checksum.scala:23:7, :47:44]
assign _io_result_valid_T = &state; // @[Checksum.scala:43:22, :49:28]
assign io_result_valid_0 = _io_result_valid_T; // @[Checksum.scala:23:7, :49:28]
assign _io_result_bits_T = csum[15:0]; // @[Checksum.scala:32:17, :50:25]
wire [15:0] lower = csum[15:0]; // @[Checksum.scala:32:17, :50:25, :73:21]
assign io_result_bits_0 = _io_result_bits_T; // @[Checksum.scala:23:7, :50:25]
wire [80:0] _csum_T = {1'h0, csum} + {17'h0, sumData}; // @[Checksum.scala:32:17, :36:31, :40:40, :62:20]
wire [79:0] _csum_T_1 = _csum_T[79:0]; // @[Checksum.scala:62:20]
wire [63:0] upper = csum[79:16]; // @[Checksum.scala:32:17, :72:21]
wire [15:0] _csum_T_2 = ~lower; // @[Checksum.scala:73:21, :76:15]
wire [64:0] _csum_T_3 = {1'h0, upper} + {49'h0, lower}; // @[Checksum.scala:36:31, :72:21, :73:21, :79:21]
wire [63:0] _csum_T_4 = _csum_T_3[63:0]; // @[Checksum.scala:79:21]
wire _T = io_req_ready_0 & io_req_valid_0; // @[Decoupled.scala:51:35]
wire _T_1 = io_stream_in_ready_0 & io_stream_in_valid_0; // @[Decoupled.scala:51:35]
wire _T_2 = state == 2'h2; // @[Checksum.scala:43:22, :71:15]
wire _T_3 = upper == 64'h0; // @[Checksum.scala:72:21, :75:17]
always @(posedge clock) begin // @[Checksum.scala:23:7]
if (_T_2) // @[Checksum.scala:71:15]
csum <= _T_3 ? {64'h0, _csum_T_2} : {16'h0, _csum_T_4}; // @[Checksum.scala:32:17, :75:{17,26}, :76:{12,15}, :79:{12,21}]
else if (_T_1) // @[Decoupled.scala:51:35]
csum <= _csum_T_1; // @[Checksum.scala:32:17, :62:20]
else if (_T) // @[Decoupled.scala:51:35]
csum <= 80'h0; // @[Checksum.scala:32:17, :55:10]
if (_T_1) // @[Decoupled.scala:51:35]
startPos <= nextStartPos; // @[Checksum.scala:35:21, :36:31]
else if (_T) // @[Decoupled.scala:51:35]
startPos <= 16'h0; // @[Checksum.scala:35:21]
if (reset) // @[Checksum.scala:23:7]
state <= 2'h0; // @[Checksum.scala:43:22]
else if (io_result_ready_0 & io_result_valid_0) // @[Decoupled.scala:51:35]
state <= 2'h0; // @[Checksum.scala:43:22]
else if (_T_2 & _T_3) // @[Checksum.scala:60:28, :71:{15,27}, :75:{17,26}, :77:13]
state <= 2'h3; // @[Checksum.scala:43:22]
else if (_T_1 & io_stream_in_bits_last_0) // @[Decoupled.scala:51:35]
state <= 2'h2; // @[Checksum.scala:43:22]
else if (_T) // @[Decoupled.scala:51:35]
state <= 2'h1; // @[Checksum.scala:43:22]
always @(posedge)
assign io_req_ready = io_req_ready_0; // @[Checksum.scala:23:7]
assign io_stream_in_ready = io_stream_in_ready_0; // @[Checksum.scala:23:7]
assign io_result_valid = io_result_valid_0; // @[Checksum.scala:23:7]
assign io_result_bits = io_result_bits_0; // @[Checksum.scala:23:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncQueueSource_Phit_13( // @[AsyncQueue.scala:70:7]
input clock, // @[AsyncQueue.scala:70:7]
input reset, // @[AsyncQueue.scala:70:7]
output io_enq_ready, // @[AsyncQueue.scala:73:14]
input io_enq_valid, // @[AsyncQueue.scala:73:14]
input [31:0] io_enq_bits_phit, // @[AsyncQueue.scala:73:14]
output [31:0] io_async_mem_0_phit, // @[AsyncQueue.scala:73:14]
output [31:0] io_async_mem_1_phit, // @[AsyncQueue.scala:73:14]
output [31:0] io_async_mem_2_phit, // @[AsyncQueue.scala:73:14]
output [31:0] io_async_mem_3_phit, // @[AsyncQueue.scala:73:14]
output [31:0] io_async_mem_4_phit, // @[AsyncQueue.scala:73:14]
output [31:0] io_async_mem_5_phit, // @[AsyncQueue.scala:73:14]
output [31:0] io_async_mem_6_phit, // @[AsyncQueue.scala:73:14]
output [31:0] io_async_mem_7_phit, // @[AsyncQueue.scala:73:14]
input [3:0] io_async_ridx, // @[AsyncQueue.scala:73:14]
output [3:0] io_async_widx, // @[AsyncQueue.scala:73:14]
input io_async_safe_ridx_valid, // @[AsyncQueue.scala:73:14]
output io_async_safe_widx_valid, // @[AsyncQueue.scala:73:14]
output io_async_safe_source_reset_n, // @[AsyncQueue.scala:73:14]
input io_async_safe_sink_reset_n // @[AsyncQueue.scala:73:14]
);
wire _sink_extend_io_out; // @[AsyncQueue.scala:105:30]
wire _source_valid_0_io_out; // @[AsyncQueue.scala:102:32]
wire io_enq_valid_0 = io_enq_valid; // @[AsyncQueue.scala:70:7]
wire [31:0] io_enq_bits_phit_0 = io_enq_bits_phit; // @[AsyncQueue.scala:70:7]
wire [3:0] io_async_ridx_0 = io_async_ridx; // @[AsyncQueue.scala:70:7]
wire io_async_safe_ridx_valid_0 = io_async_safe_ridx_valid; // @[AsyncQueue.scala:70:7]
wire io_async_safe_sink_reset_n_0 = io_async_safe_sink_reset_n; // @[AsyncQueue.scala:70:7]
wire _widx_T = reset; // @[AsyncQueue.scala:83:30]
wire _ready_reg_T = reset; // @[AsyncQueue.scala:90:35]
wire _widx_reg_T = reset; // @[AsyncQueue.scala:93:34]
wire _source_valid_0_reset_T = reset; // @[AsyncQueue.scala:107:36]
wire _source_valid_1_reset_T = reset; // @[AsyncQueue.scala:108:36]
wire _sink_extend_reset_T = reset; // @[AsyncQueue.scala:109:36]
wire _sink_valid_reset_T = reset; // @[AsyncQueue.scala:110:35]
wire _io_async_safe_source_reset_n_T = reset; // @[AsyncQueue.scala:123:34]
wire _io_enq_ready_T; // @[AsyncQueue.scala:91:29]
wire _io_async_safe_source_reset_n_T_1; // @[AsyncQueue.scala:123:27]
wire io_enq_ready_0; // @[AsyncQueue.scala:70:7]
wire [31:0] io_async_mem_0_phit_0; // @[AsyncQueue.scala:70:7]
wire [31:0] io_async_mem_1_phit_0; // @[AsyncQueue.scala:70:7]
wire [31:0] io_async_mem_2_phit_0; // @[AsyncQueue.scala:70:7]
wire [31:0] io_async_mem_3_phit_0; // @[AsyncQueue.scala:70:7]
wire [31:0] io_async_mem_4_phit_0; // @[AsyncQueue.scala:70:7]
wire [31:0] io_async_mem_5_phit_0; // @[AsyncQueue.scala:70:7]
wire [31:0] io_async_mem_6_phit_0; // @[AsyncQueue.scala:70:7]
wire [31:0] io_async_mem_7_phit_0; // @[AsyncQueue.scala:70:7]
wire io_async_safe_widx_valid_0; // @[AsyncQueue.scala:70:7]
wire io_async_safe_source_reset_n_0; // @[AsyncQueue.scala:70:7]
wire [3:0] io_async_widx_0; // @[AsyncQueue.scala:70:7]
wire sink_ready; // @[AsyncQueue.scala:81:28]
reg [31:0] mem_0_phit; // @[AsyncQueue.scala:82:16]
assign io_async_mem_0_phit_0 = mem_0_phit; // @[AsyncQueue.scala:70:7, :82:16]
reg [31:0] mem_1_phit; // @[AsyncQueue.scala:82:16]
assign io_async_mem_1_phit_0 = mem_1_phit; // @[AsyncQueue.scala:70:7, :82:16]
reg [31:0] mem_2_phit; // @[AsyncQueue.scala:82:16]
assign io_async_mem_2_phit_0 = mem_2_phit; // @[AsyncQueue.scala:70:7, :82:16]
reg [31:0] mem_3_phit; // @[AsyncQueue.scala:82:16]
assign io_async_mem_3_phit_0 = mem_3_phit; // @[AsyncQueue.scala:70:7, :82:16]
reg [31:0] mem_4_phit; // @[AsyncQueue.scala:82:16]
assign io_async_mem_4_phit_0 = mem_4_phit; // @[AsyncQueue.scala:70:7, :82:16]
reg [31:0] mem_5_phit; // @[AsyncQueue.scala:82:16]
assign io_async_mem_5_phit_0 = mem_5_phit; // @[AsyncQueue.scala:70:7, :82:16]
reg [31:0] mem_6_phit; // @[AsyncQueue.scala:82:16]
assign io_async_mem_6_phit_0 = mem_6_phit; // @[AsyncQueue.scala:70:7, :82:16]
reg [31:0] mem_7_phit; // @[AsyncQueue.scala:82:16]
assign io_async_mem_7_phit_0 = mem_7_phit; // @[AsyncQueue.scala:70:7, :82:16]
wire _widx_T_1 = io_enq_ready_0 & io_enq_valid_0; // @[Decoupled.scala:51:35]
wire _widx_T_2 = ~sink_ready; // @[AsyncQueue.scala:81:28, :83:77]
wire [3:0] _widx_incremented_T_2; // @[AsyncQueue.scala:53:23]
wire [3:0] widx_incremented; // @[AsyncQueue.scala:51:27]
reg [3:0] widx_widx_bin; // @[AsyncQueue.scala:52:25]
wire [4:0] _widx_incremented_T = {1'h0, widx_widx_bin} + {4'h0, _widx_T_1}; // @[Decoupled.scala:51:35]
wire [3:0] _widx_incremented_T_1 = _widx_incremented_T[3:0]; // @[AsyncQueue.scala:53:43]
assign _widx_incremented_T_2 = _widx_T_2 ? 4'h0 : _widx_incremented_T_1; // @[AsyncQueue.scala:52:25, :53:{23,43}, :83:77]
assign widx_incremented = _widx_incremented_T_2; // @[AsyncQueue.scala:51:27, :53:23]
wire [2:0] _widx_T_3 = widx_incremented[3:1]; // @[AsyncQueue.scala:51:27, :54:32]
wire [3:0] widx = {widx_incremented[3], widx_incremented[2:0] ^ _widx_T_3}; // @[AsyncQueue.scala:51:27, :54:{17,32}]
wire [3:0] ridx; // @[ShiftReg.scala:48:24]
wire [3:0] _ready_T = ridx ^ 4'hC; // @[ShiftReg.scala:48:24]
wire _ready_T_1 = widx != _ready_T; // @[AsyncQueue.scala:54:17, :85:{34,44}]
wire ready = sink_ready & _ready_T_1; // @[AsyncQueue.scala:81:28, :85:{26,34}]
wire [2:0] _index_T = io_async_widx_0[2:0]; // @[AsyncQueue.scala:70:7, :87:52]
wire _index_T_1 = io_async_widx_0[3]; // @[AsyncQueue.scala:70:7, :87:80]
wire [2:0] _index_T_2 = {_index_T_1, 2'h0}; // @[AsyncQueue.scala:87:{80,93}]
wire [2:0] index = _index_T ^ _index_T_2; // @[AsyncQueue.scala:87:{52,64,93}]
reg ready_reg; // @[AsyncQueue.scala:90:56]
assign _io_enq_ready_T = ready_reg & sink_ready; // @[AsyncQueue.scala:81:28, :90:56, :91:29]
assign io_enq_ready_0 = _io_enq_ready_T; // @[AsyncQueue.scala:70:7, :91:29]
reg [3:0] widx_gray; // @[AsyncQueue.scala:93:55]
assign io_async_widx_0 = widx_gray; // @[AsyncQueue.scala:70:7, :93:55]
wire _source_valid_0_reset_T_1 = ~io_async_safe_sink_reset_n_0; // @[AsyncQueue.scala:70:7, :107:46]
wire _source_valid_0_reset_T_2 = _source_valid_0_reset_T | _source_valid_0_reset_T_1; // @[AsyncQueue.scala:107:{36,43,46}]
wire _source_valid_0_reset_T_3 = _source_valid_0_reset_T_2; // @[AsyncQueue.scala:107:{43,65}]
wire _source_valid_1_reset_T_1 = ~io_async_safe_sink_reset_n_0; // @[AsyncQueue.scala:70:7, :107:46, :108:46]
wire _source_valid_1_reset_T_2 = _source_valid_1_reset_T | _source_valid_1_reset_T_1; // @[AsyncQueue.scala:108:{36,43,46}]
wire _source_valid_1_reset_T_3 = _source_valid_1_reset_T_2; // @[AsyncQueue.scala:108:{43,65}]
wire _sink_extend_reset_T_1 = ~io_async_safe_sink_reset_n_0; // @[AsyncQueue.scala:70:7, :107:46, :109:46]
wire _sink_extend_reset_T_2 = _sink_extend_reset_T | _sink_extend_reset_T_1; // @[AsyncQueue.scala:109:{36,43,46}]
wire _sink_extend_reset_T_3 = _sink_extend_reset_T_2; // @[AsyncQueue.scala:109:{43,65}]
assign _io_async_safe_source_reset_n_T_1 = ~_io_async_safe_source_reset_n_T; // @[AsyncQueue.scala:123:{27,34}]
assign io_async_safe_source_reset_n_0 = _io_async_safe_source_reset_n_T_1; // @[AsyncQueue.scala:70:7, :123:27]
always @(posedge clock) begin // @[AsyncQueue.scala:70:7]
if (_widx_T_1 & index == 3'h0) // @[Decoupled.scala:51:35]
mem_0_phit <= io_enq_bits_phit_0; // @[AsyncQueue.scala:70:7, :82:16]
if (_widx_T_1 & index == 3'h1) // @[Decoupled.scala:51:35]
mem_1_phit <= io_enq_bits_phit_0; // @[AsyncQueue.scala:70:7, :82:16]
if (_widx_T_1 & index == 3'h2) // @[Decoupled.scala:51:35]
mem_2_phit <= io_enq_bits_phit_0; // @[AsyncQueue.scala:70:7, :82:16]
if (_widx_T_1 & index == 3'h3) // @[Decoupled.scala:51:35]
mem_3_phit <= io_enq_bits_phit_0; // @[AsyncQueue.scala:70:7, :82:16]
if (_widx_T_1 & index == 3'h4) // @[Decoupled.scala:51:35]
mem_4_phit <= io_enq_bits_phit_0; // @[AsyncQueue.scala:70:7, :82:16]
if (_widx_T_1 & index == 3'h5) // @[Decoupled.scala:51:35]
mem_5_phit <= io_enq_bits_phit_0; // @[AsyncQueue.scala:70:7, :82:16]
if (_widx_T_1 & index == 3'h6) // @[Decoupled.scala:51:35]
mem_6_phit <= io_enq_bits_phit_0; // @[AsyncQueue.scala:70:7, :82:16]
if (_widx_T_1 & (&index)) // @[Decoupled.scala:51:35]
mem_7_phit <= io_enq_bits_phit_0; // @[AsyncQueue.scala:70:7, :82:16]
always @(posedge)
always @(posedge clock or posedge _widx_T) begin // @[AsyncQueue.scala:70:7, :83:30]
if (_widx_T) // @[AsyncQueue.scala:70:7, :83:30]
widx_widx_bin <= 4'h0; // @[AsyncQueue.scala:52:25]
else // @[AsyncQueue.scala:70:7]
widx_widx_bin <= widx_incremented; // @[AsyncQueue.scala:51:27, :52:25]
always @(posedge, posedge)
always @(posedge clock or posedge _ready_reg_T) begin // @[AsyncQueue.scala:70:7, :90:35]
if (_ready_reg_T) // @[AsyncQueue.scala:70:7, :90:35]
ready_reg <= 1'h0; // @[AsyncQueue.scala:90:56]
else // @[AsyncQueue.scala:70:7]
ready_reg <= ready; // @[AsyncQueue.scala:85:26, :90:56]
always @(posedge, posedge)
always @(posedge clock or posedge _widx_reg_T) begin // @[AsyncQueue.scala:70:7, :93:34]
if (_widx_reg_T) // @[AsyncQueue.scala:70:7, :93:34]
widx_gray <= 4'h0; // @[AsyncQueue.scala:52:25, :93:55]
else // @[AsyncQueue.scala:70:7]
widx_gray <= widx; // @[AsyncQueue.scala:54:17, :93:55]
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_105( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [6:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire a_first_done = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35]
reg a_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [6:0] address; // @[Monitor.scala:391:22]
reg d_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] inflight; // @[Monitor.scala:614:27]
reg [3:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [3:0] inflight_sizes; // @[Monitor.scala:618:33]
reg a_first_counter_1; // @[Edges.scala:229:27]
reg d_first_counter_1; // @[Edges.scala:229:27]
wire a_set = a_first_done & ~a_first_counter_1; // @[Decoupled.scala:51:35]
wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46]
wire _GEN = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
reg [1:0] inflight_1; // @[Monitor.scala:726:35]
reg [3:0] inflight_sizes_1; // @[Monitor.scala:728:35]
reg d_first_counter_2; // @[Edges.scala:229:27]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File Crossing.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.interrupts
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.util.{SynchronizerShiftReg, AsyncResetReg}
@deprecated("IntXing does not ensure interrupt source is glitch free. Use IntSyncSource and IntSyncSink", "rocket-chip 1.2")
class IntXing(sync: Int = 3)(implicit p: Parameters) extends LazyModule
{
val intnode = IntAdapterNode()
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(intnode.in zip intnode.out) foreach { case ((in, _), (out, _)) =>
out := SynchronizerShiftReg(in, sync)
}
}
}
object IntSyncCrossingSource
{
def apply(alreadyRegistered: Boolean = false)(implicit p: Parameters) =
{
val intsource = LazyModule(new IntSyncCrossingSource(alreadyRegistered))
intsource.node
}
}
class IntSyncCrossingSource(alreadyRegistered: Boolean = false)(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSourceNode(alreadyRegistered)
lazy val module = if (alreadyRegistered) (new ImplRegistered) else (new Impl)
class Impl extends LazyModuleImp(this) {
def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0)
override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.sync := AsyncResetReg(Cat(in.reverse)).asBools
}
}
class ImplRegistered extends LazyRawModuleImp(this) {
def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0)
override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}_Registered"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.sync := in
}
}
}
object IntSyncCrossingSink
{
@deprecated("IntSyncCrossingSink which used the `sync` parameter to determine crossing type is deprecated. Use IntSyncAsyncCrossingSink, IntSyncRationalCrossingSink, or IntSyncSyncCrossingSink instead for > 1, 1, and 0 sync values respectively", "rocket-chip 1.2")
def apply(sync: Int = 3)(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync))
intsink.node
}
}
class IntSyncAsyncCrossingSink(sync: Int = 3)(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(sync)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = s"IntSyncAsyncCrossingSink_n${node.out.size}x${node.out.head._1.size}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := SynchronizerShiftReg(in.sync, sync)
}
}
}
object IntSyncAsyncCrossingSink
{
def apply(sync: Int = 3)(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync))
intsink.node
}
}
class IntSyncSyncCrossingSink()(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(0)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
def outSize = node.out.headOption.map(_._1.size).getOrElse(0)
override def desiredName = s"IntSyncSyncCrossingSink_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := in.sync
}
}
}
object IntSyncSyncCrossingSink
{
def apply()(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncSyncCrossingSink())
intsink.node
}
}
class IntSyncRationalCrossingSink()(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(1)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def outSize = node.out.headOption.map(_._1.size).getOrElse(0)
override def desiredName = s"IntSyncRationalCrossingSink_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := RegNext(in.sync)
}
}
}
object IntSyncRationalCrossingSink
{
def apply()(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncRationalCrossingSink())
intsink.node
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module IntSyncSyncCrossingSink_n1x1_11( // @[Crossing.scala:96:9]
input auto_in_sync_0, // @[LazyModuleImp.scala:107:25]
output auto_out_0 // @[LazyModuleImp.scala:107:25]
);
wire auto_in_sync_0_0 = auto_in_sync_0; // @[Crossing.scala:96:9]
wire childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire nodeIn_sync_0 = auto_in_sync_0_0; // @[Crossing.scala:96:9]
wire nodeOut_0; // @[MixedNode.scala:542:17]
wire auto_out_0_0; // @[Crossing.scala:96:9]
assign nodeOut_0 = nodeIn_sync_0; // @[MixedNode.scala:542:17, :551:17]
assign auto_out_0_0 = nodeOut_0; // @[Crossing.scala:96:9]
assign auto_out_0 = auto_out_0_0; // @[Crossing.scala:96:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Pipeline.scala:
package gemmini
import chisel3._
import chisel3.util._
class Pipeline[T <: Data] (gen: T, latency: Int)(comb: Seq[T => T] = Seq.fill(latency+1)((x: T) => x)) extends Module {
val io = IO(new Bundle {
val in = Flipped(Decoupled(gen))
val out = Decoupled(gen)
val busy = Output(Bool())
})
require(comb.size == latency+1, "length of combinational is incorrect")
if (latency == 0) {
io.in.ready := io.out.ready
io.out.valid := io.in.valid
io.out.bits := comb.head(io.in.bits)
io.busy := io.in.valid
} else {
val stages = Reg(Vec(latency, gen))
val valids = RegInit(VecInit(Seq.fill(latency)(false.B)))
val stalling = VecInit(Seq.fill(latency)(false.B))
io.busy := io.in.valid || valids.reduce(_||_)
// Stall signals
io.in.ready := !stalling.head
stalling.last := valids.last && !io.out.ready
(stalling.init, stalling.tail, valids.init).zipped.foreach { case (s1, s2, v1) =>
s1 := v1 && s2
}
// Valid signals
// When the pipeline stage ahead of you isn't stalling, then make yourself invalid
io.out.valid := valids.last
when(io.out.ready) {
valids.last := false.B
}
(valids.init, stalling.tail).zipped.foreach { case (v1, s2) =>
when(!s2) {
v1 := false.B
}
}
// When the pipeline stage behind you is valid then become true
when(io.in.fire) {
valids.head := true.B
}
(valids.tail, valids.init).zipped.foreach { case (v2, v1) =>
when(v1) {
v2 := true.B
}
}
// Stages
when(io.in.fire) {
stages.head := comb.head(io.in.bits)
}
io.out.bits := comb.last(stages.last)
((stages.tail zip stages.init) zip (stalling.tail zip comb.tail.init)).foreach { case ((st2, st1), (s2, c1)) =>
when(!s2) {
st2 := c1(st1)
}
}
}
}
object Pipeline {
def apply[T <: Data](in: ReadyValidIO[T], latency: Int, comb: Seq[T => T]): DecoupledIO[T] = {
val p = Module(new Pipeline(in.bits.cloneType, latency)(comb))
p.io.in <> in
p.io.out
}
def apply[T <: Data](in: ReadyValidIO[T], latency: Int): DecoupledIO[T] = {
val p = Module(new Pipeline(in.bits.cloneType, latency)())
p.io.in <> in
p.io.out
}
}
| module Pipeline_11( // @[Pipeline.scala:6:7]
input clock, // @[Pipeline.scala:6:7]
input reset, // @[Pipeline.scala:6:7]
output io_in_ready, // @[Pipeline.scala:7:14]
input io_in_valid, // @[Pipeline.scala:7:14]
input [6:0] io_in_bits_cmd_inst_funct, // @[Pipeline.scala:7:14]
input [63:0] io_in_bits_cmd_rs1, // @[Pipeline.scala:7:14]
input [63:0] io_in_bits_cmd_rs2, // @[Pipeline.scala:7:14]
input [67:0] io_in_bits_dram_addr, // @[Pipeline.scala:7:14]
input [50:0] io_in_bits_spad_addr, // @[Pipeline.scala:7:14]
input [19:0] io_in_bits_I, // @[Pipeline.scala:7:14]
input [17:0] io_in_bits_K, // @[Pipeline.scala:7:14]
input io_out_ready, // @[Pipeline.scala:7:14]
output io_out_valid, // @[Pipeline.scala:7:14]
output [6:0] io_out_bits_cmd_inst_funct, // @[Pipeline.scala:7:14]
output [4:0] io_out_bits_cmd_inst_rs2, // @[Pipeline.scala:7:14]
output [4:0] io_out_bits_cmd_inst_rs1, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_inst_xd, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_inst_xs1, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_inst_xs2, // @[Pipeline.scala:7:14]
output [4:0] io_out_bits_cmd_inst_rd, // @[Pipeline.scala:7:14]
output [6:0] io_out_bits_cmd_inst_opcode, // @[Pipeline.scala:7:14]
output [63:0] io_out_bits_cmd_rs1, // @[Pipeline.scala:7:14]
output [63:0] io_out_bits_cmd_rs2, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_debug, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_cease, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_wfi, // @[Pipeline.scala:7:14]
output [31:0] io_out_bits_cmd_status_isa, // @[Pipeline.scala:7:14]
output [1:0] io_out_bits_cmd_status_dprv, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_dv, // @[Pipeline.scala:7:14]
output [1:0] io_out_bits_cmd_status_prv, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_v, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_sd, // @[Pipeline.scala:7:14]
output [22:0] io_out_bits_cmd_status_zero2, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_mpv, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_gva, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_mbe, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_sbe, // @[Pipeline.scala:7:14]
output [1:0] io_out_bits_cmd_status_sxl, // @[Pipeline.scala:7:14]
output [1:0] io_out_bits_cmd_status_uxl, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_sd_rv32, // @[Pipeline.scala:7:14]
output [7:0] io_out_bits_cmd_status_zero1, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_tsr, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_tw, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_tvm, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_mxr, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_sum, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_mprv, // @[Pipeline.scala:7:14]
output [1:0] io_out_bits_cmd_status_xs, // @[Pipeline.scala:7:14]
output [1:0] io_out_bits_cmd_status_fs, // @[Pipeline.scala:7:14]
output [1:0] io_out_bits_cmd_status_mpp, // @[Pipeline.scala:7:14]
output [1:0] io_out_bits_cmd_status_vs, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_spp, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_mpie, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_ube, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_spie, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_upie, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_mie, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_hie, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_sie, // @[Pipeline.scala:7:14]
output io_out_bits_cmd_status_uie, // @[Pipeline.scala:7:14]
output [67:0] io_out_bits_dram_addr, // @[Pipeline.scala:7:14]
output [50:0] io_out_bits_spad_addr, // @[Pipeline.scala:7:14]
output [19:0] io_out_bits_I, // @[Pipeline.scala:7:14]
output [17:0] io_out_bits_K, // @[Pipeline.scala:7:14]
output io_busy // @[Pipeline.scala:7:14]
);
wire io_in_valid_0 = io_in_valid; // @[Pipeline.scala:6:7]
wire [6:0] io_in_bits_cmd_inst_funct_0 = io_in_bits_cmd_inst_funct; // @[Pipeline.scala:6:7]
wire [63:0] io_in_bits_cmd_rs1_0 = io_in_bits_cmd_rs1; // @[Pipeline.scala:6:7]
wire [63:0] io_in_bits_cmd_rs2_0 = io_in_bits_cmd_rs2; // @[Pipeline.scala:6:7]
wire [67:0] io_in_bits_dram_addr_0 = io_in_bits_dram_addr; // @[Pipeline.scala:6:7]
wire [50:0] io_in_bits_spad_addr_0 = io_in_bits_spad_addr; // @[Pipeline.scala:6:7]
wire [19:0] io_in_bits_I_0 = io_in_bits_I; // @[Pipeline.scala:6:7]
wire [17:0] io_in_bits_K_0 = io_in_bits_K; // @[Pipeline.scala:6:7]
wire io_out_ready_0 = io_out_ready; // @[Pipeline.scala:6:7]
wire [4:0] io_in_bits_cmd_inst_rs2 = 5'h0; // @[Pipeline.scala:6:7, :7:14]
wire [4:0] io_in_bits_cmd_inst_rs1 = 5'h0; // @[Pipeline.scala:6:7, :7:14]
wire [4:0] io_in_bits_cmd_inst_rd = 5'h0; // @[Pipeline.scala:6:7, :7:14]
wire [6:0] io_in_bits_cmd_inst_opcode = 7'h0; // @[Pipeline.scala:6:7, :7:14]
wire [31:0] io_in_bits_cmd_status_isa = 32'h0; // @[Pipeline.scala:6:7, :7:14]
wire [22:0] io_in_bits_cmd_status_zero2 = 23'h0; // @[Pipeline.scala:6:7, :7:14]
wire [7:0] io_in_bits_cmd_status_zero1 = 8'h0; // @[Pipeline.scala:6:7, :7:14]
wire [1:0] io_in_bits_cmd_status_dprv = 2'h0; // @[Pipeline.scala:6:7, :7:14]
wire [1:0] io_in_bits_cmd_status_prv = 2'h0; // @[Pipeline.scala:6:7, :7:14]
wire [1:0] io_in_bits_cmd_status_sxl = 2'h0; // @[Pipeline.scala:6:7, :7:14]
wire [1:0] io_in_bits_cmd_status_uxl = 2'h0; // @[Pipeline.scala:6:7, :7:14]
wire [1:0] io_in_bits_cmd_status_xs = 2'h0; // @[Pipeline.scala:6:7, :7:14]
wire [1:0] io_in_bits_cmd_status_fs = 2'h0; // @[Pipeline.scala:6:7, :7:14]
wire [1:0] io_in_bits_cmd_status_mpp = 2'h0; // @[Pipeline.scala:6:7, :7:14]
wire [1:0] io_in_bits_cmd_status_vs = 2'h0; // @[Pipeline.scala:6:7, :7:14]
wire io_in_bits_cmd_inst_xd = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_inst_xs1 = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_inst_xs2 = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_debug = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_cease = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_wfi = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_dv = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_v = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_sd = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_mpv = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_gva = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_mbe = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_sbe = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_sd_rv32 = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_tsr = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_tw = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_tvm = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_mxr = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_sum = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_mprv = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_spp = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_mpie = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_ube = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_spie = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_upie = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_mie = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_hie = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_sie = 1'h0; // @[Pipeline.scala:6:7]
wire io_in_bits_cmd_status_uie = 1'h0; // @[Pipeline.scala:6:7]
wire _valids_WIRE_0 = 1'h0; // @[Pipeline.scala:22:33]
wire _valids_WIRE_1 = 1'h0; // @[Pipeline.scala:22:33]
wire _io_in_ready_T; // @[Pipeline.scala:27:20]
wire _io_busy_T_1; // @[Pipeline.scala:24:28]
wire io_in_ready_0; // @[Pipeline.scala:6:7]
wire [6:0] io_out_bits_cmd_inst_funct_0; // @[Pipeline.scala:6:7]
wire [4:0] io_out_bits_cmd_inst_rs2_0; // @[Pipeline.scala:6:7]
wire [4:0] io_out_bits_cmd_inst_rs1_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_inst_xd_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_inst_xs1_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_inst_xs2_0; // @[Pipeline.scala:6:7]
wire [4:0] io_out_bits_cmd_inst_rd_0; // @[Pipeline.scala:6:7]
wire [6:0] io_out_bits_cmd_inst_opcode_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_debug_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_cease_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_wfi_0; // @[Pipeline.scala:6:7]
wire [31:0] io_out_bits_cmd_status_isa_0; // @[Pipeline.scala:6:7]
wire [1:0] io_out_bits_cmd_status_dprv_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_dv_0; // @[Pipeline.scala:6:7]
wire [1:0] io_out_bits_cmd_status_prv_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_v_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_sd_0; // @[Pipeline.scala:6:7]
wire [22:0] io_out_bits_cmd_status_zero2_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_mpv_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_gva_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_mbe_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_sbe_0; // @[Pipeline.scala:6:7]
wire [1:0] io_out_bits_cmd_status_sxl_0; // @[Pipeline.scala:6:7]
wire [1:0] io_out_bits_cmd_status_uxl_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_sd_rv32_0; // @[Pipeline.scala:6:7]
wire [7:0] io_out_bits_cmd_status_zero1_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_tsr_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_tw_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_tvm_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_mxr_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_sum_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_mprv_0; // @[Pipeline.scala:6:7]
wire [1:0] io_out_bits_cmd_status_xs_0; // @[Pipeline.scala:6:7]
wire [1:0] io_out_bits_cmd_status_fs_0; // @[Pipeline.scala:6:7]
wire [1:0] io_out_bits_cmd_status_mpp_0; // @[Pipeline.scala:6:7]
wire [1:0] io_out_bits_cmd_status_vs_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_spp_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_mpie_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_ube_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_spie_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_upie_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_mie_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_hie_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_sie_0; // @[Pipeline.scala:6:7]
wire io_out_bits_cmd_status_uie_0; // @[Pipeline.scala:6:7]
wire [63:0] io_out_bits_cmd_rs1_0; // @[Pipeline.scala:6:7]
wire [63:0] io_out_bits_cmd_rs2_0; // @[Pipeline.scala:6:7]
wire [67:0] io_out_bits_dram_addr_0; // @[Pipeline.scala:6:7]
wire [50:0] io_out_bits_spad_addr_0; // @[Pipeline.scala:6:7]
wire [19:0] io_out_bits_I_0; // @[Pipeline.scala:6:7]
wire [17:0] io_out_bits_K_0; // @[Pipeline.scala:6:7]
wire io_out_valid_0; // @[Pipeline.scala:6:7]
wire io_busy_0; // @[Pipeline.scala:6:7]
reg [6:0] stages_0_cmd_inst_funct; // @[Pipeline.scala:21:21]
reg [63:0] stages_0_cmd_rs1; // @[Pipeline.scala:21:21]
reg [63:0] stages_0_cmd_rs2; // @[Pipeline.scala:21:21]
reg [67:0] stages_0_dram_addr; // @[Pipeline.scala:21:21]
reg [50:0] stages_0_spad_addr; // @[Pipeline.scala:21:21]
reg [19:0] stages_0_I; // @[Pipeline.scala:21:21]
reg [17:0] stages_0_K; // @[Pipeline.scala:21:21]
reg [6:0] stages_1_cmd_inst_funct; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_inst_funct_0 = stages_1_cmd_inst_funct; // @[Pipeline.scala:6:7, :21:21]
reg [4:0] stages_1_cmd_inst_rs2; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_inst_rs2_0 = stages_1_cmd_inst_rs2; // @[Pipeline.scala:6:7, :21:21]
reg [4:0] stages_1_cmd_inst_rs1; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_inst_rs1_0 = stages_1_cmd_inst_rs1; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_inst_xd; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_inst_xd_0 = stages_1_cmd_inst_xd; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_inst_xs1; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_inst_xs1_0 = stages_1_cmd_inst_xs1; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_inst_xs2; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_inst_xs2_0 = stages_1_cmd_inst_xs2; // @[Pipeline.scala:6:7, :21:21]
reg [4:0] stages_1_cmd_inst_rd; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_inst_rd_0 = stages_1_cmd_inst_rd; // @[Pipeline.scala:6:7, :21:21]
reg [6:0] stages_1_cmd_inst_opcode; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_inst_opcode_0 = stages_1_cmd_inst_opcode; // @[Pipeline.scala:6:7, :21:21]
reg [63:0] stages_1_cmd_rs1; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_rs1_0 = stages_1_cmd_rs1; // @[Pipeline.scala:6:7, :21:21]
reg [63:0] stages_1_cmd_rs2; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_rs2_0 = stages_1_cmd_rs2; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_debug; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_debug_0 = stages_1_cmd_status_debug; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_cease; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_cease_0 = stages_1_cmd_status_cease; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_wfi; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_wfi_0 = stages_1_cmd_status_wfi; // @[Pipeline.scala:6:7, :21:21]
reg [31:0] stages_1_cmd_status_isa; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_isa_0 = stages_1_cmd_status_isa; // @[Pipeline.scala:6:7, :21:21]
reg [1:0] stages_1_cmd_status_dprv; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_dprv_0 = stages_1_cmd_status_dprv; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_dv; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_dv_0 = stages_1_cmd_status_dv; // @[Pipeline.scala:6:7, :21:21]
reg [1:0] stages_1_cmd_status_prv; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_prv_0 = stages_1_cmd_status_prv; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_v; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_v_0 = stages_1_cmd_status_v; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_sd; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_sd_0 = stages_1_cmd_status_sd; // @[Pipeline.scala:6:7, :21:21]
reg [22:0] stages_1_cmd_status_zero2; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_zero2_0 = stages_1_cmd_status_zero2; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_mpv; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_mpv_0 = stages_1_cmd_status_mpv; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_gva; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_gva_0 = stages_1_cmd_status_gva; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_mbe; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_mbe_0 = stages_1_cmd_status_mbe; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_sbe; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_sbe_0 = stages_1_cmd_status_sbe; // @[Pipeline.scala:6:7, :21:21]
reg [1:0] stages_1_cmd_status_sxl; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_sxl_0 = stages_1_cmd_status_sxl; // @[Pipeline.scala:6:7, :21:21]
reg [1:0] stages_1_cmd_status_uxl; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_uxl_0 = stages_1_cmd_status_uxl; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_sd_rv32; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_sd_rv32_0 = stages_1_cmd_status_sd_rv32; // @[Pipeline.scala:6:7, :21:21]
reg [7:0] stages_1_cmd_status_zero1; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_zero1_0 = stages_1_cmd_status_zero1; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_tsr; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_tsr_0 = stages_1_cmd_status_tsr; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_tw; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_tw_0 = stages_1_cmd_status_tw; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_tvm; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_tvm_0 = stages_1_cmd_status_tvm; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_mxr; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_mxr_0 = stages_1_cmd_status_mxr; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_sum; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_sum_0 = stages_1_cmd_status_sum; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_mprv; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_mprv_0 = stages_1_cmd_status_mprv; // @[Pipeline.scala:6:7, :21:21]
reg [1:0] stages_1_cmd_status_xs; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_xs_0 = stages_1_cmd_status_xs; // @[Pipeline.scala:6:7, :21:21]
reg [1:0] stages_1_cmd_status_fs; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_fs_0 = stages_1_cmd_status_fs; // @[Pipeline.scala:6:7, :21:21]
reg [1:0] stages_1_cmd_status_mpp; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_mpp_0 = stages_1_cmd_status_mpp; // @[Pipeline.scala:6:7, :21:21]
reg [1:0] stages_1_cmd_status_vs; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_vs_0 = stages_1_cmd_status_vs; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_spp; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_spp_0 = stages_1_cmd_status_spp; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_mpie; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_mpie_0 = stages_1_cmd_status_mpie; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_ube; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_ube_0 = stages_1_cmd_status_ube; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_spie; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_spie_0 = stages_1_cmd_status_spie; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_upie; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_upie_0 = stages_1_cmd_status_upie; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_mie; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_mie_0 = stages_1_cmd_status_mie; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_hie; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_hie_0 = stages_1_cmd_status_hie; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_sie; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_sie_0 = stages_1_cmd_status_sie; // @[Pipeline.scala:6:7, :21:21]
reg stages_1_cmd_status_uie; // @[Pipeline.scala:21:21]
assign io_out_bits_cmd_status_uie_0 = stages_1_cmd_status_uie; // @[Pipeline.scala:6:7, :21:21]
reg [67:0] stages_1_dram_addr; // @[Pipeline.scala:21:21]
assign io_out_bits_dram_addr_0 = stages_1_dram_addr; // @[Pipeline.scala:6:7, :21:21]
reg [50:0] stages_1_spad_addr; // @[Pipeline.scala:21:21]
assign io_out_bits_spad_addr_0 = stages_1_spad_addr; // @[Pipeline.scala:6:7, :21:21]
reg [19:0] stages_1_I; // @[Pipeline.scala:21:21]
assign io_out_bits_I_0 = stages_1_I; // @[Pipeline.scala:6:7, :21:21]
reg [17:0] stages_1_K; // @[Pipeline.scala:21:21]
assign io_out_bits_K_0 = stages_1_K; // @[Pipeline.scala:6:7, :21:21]
reg valids_0; // @[Pipeline.scala:22:25]
reg valids_1; // @[Pipeline.scala:22:25]
assign io_out_valid_0 = valids_1; // @[Pipeline.scala:6:7, :22:25]
wire _stalling_0_T; // @[Pipeline.scala:30:16]
wire _stalling_1_T_1; // @[Pipeline.scala:28:34]
wire stalling_0; // @[Pipeline.scala:23:27]
wire stalling_1; // @[Pipeline.scala:23:27]
wire _io_busy_T = valids_0 | valids_1; // @[Pipeline.scala:22:25, :24:46]
assign _io_busy_T_1 = io_in_valid_0 | _io_busy_T; // @[Pipeline.scala:6:7, :24:{28,46}]
assign io_busy_0 = _io_busy_T_1; // @[Pipeline.scala:6:7, :24:28]
assign _io_in_ready_T = ~stalling_0; // @[Pipeline.scala:23:27, :27:20]
assign io_in_ready_0 = _io_in_ready_T; // @[Pipeline.scala:6:7, :27:20]
wire _stalling_1_T = ~io_out_ready_0; // @[Pipeline.scala:6:7, :28:37]
assign _stalling_1_T_1 = valids_1 & _stalling_1_T; // @[Pipeline.scala:22:25, :28:{34,37}]
assign stalling_1 = _stalling_1_T_1; // @[Pipeline.scala:23:27, :28:34]
assign _stalling_0_T = valids_0 & stalling_1; // @[Pipeline.scala:22:25, :23:27, :30:16]
assign stalling_0 = _stalling_0_T; // @[Pipeline.scala:23:27, :30:16]
wire _T_2 = io_in_ready_0 & io_in_valid_0; // @[Decoupled.scala:51:35]
always @(posedge clock) begin // @[Pipeline.scala:6:7]
if (_T_2) begin // @[Decoupled.scala:51:35]
stages_0_cmd_inst_funct <= io_in_bits_cmd_inst_funct_0; // @[Pipeline.scala:6:7, :21:21]
stages_0_cmd_rs1 <= io_in_bits_cmd_rs1_0; // @[Pipeline.scala:6:7, :21:21]
stages_0_cmd_rs2 <= io_in_bits_cmd_rs2_0; // @[Pipeline.scala:6:7, :21:21]
stages_0_dram_addr <= io_in_bits_dram_addr_0; // @[Pipeline.scala:6:7, :21:21]
stages_0_spad_addr <= io_in_bits_spad_addr_0; // @[Pipeline.scala:6:7, :21:21]
stages_0_I <= io_in_bits_I_0; // @[Pipeline.scala:6:7, :21:21]
stages_0_K <= io_in_bits_K_0; // @[Pipeline.scala:6:7, :21:21]
end
if (stalling_1) begin // @[Pipeline.scala:23:27]
end
else begin // @[Pipeline.scala:23:27]
stages_1_cmd_inst_funct <= stages_0_cmd_inst_funct; // @[Pipeline.scala:21:21]
stages_1_cmd_inst_rs2 <= 5'h0; // @[Pipeline.scala:6:7, :7:14, :21:21]
stages_1_cmd_inst_rs1 <= 5'h0; // @[Pipeline.scala:6:7, :7:14, :21:21]
stages_1_cmd_inst_rd <= 5'h0; // @[Pipeline.scala:6:7, :7:14, :21:21]
stages_1_cmd_inst_opcode <= 7'h0; // @[Pipeline.scala:6:7, :7:14, :21:21]
stages_1_cmd_rs1 <= stages_0_cmd_rs1; // @[Pipeline.scala:21:21]
stages_1_cmd_rs2 <= stages_0_cmd_rs2; // @[Pipeline.scala:21:21]
stages_1_cmd_status_isa <= 32'h0; // @[Pipeline.scala:6:7, :7:14, :21:21]
stages_1_cmd_status_dprv <= 2'h0; // @[Pipeline.scala:6:7, :7:14, :21:21]
stages_1_cmd_status_prv <= 2'h0; // @[Pipeline.scala:6:7, :7:14, :21:21]
stages_1_cmd_status_zero2 <= 23'h0; // @[Pipeline.scala:6:7, :7:14, :21:21]
stages_1_cmd_status_sxl <= 2'h0; // @[Pipeline.scala:6:7, :7:14, :21:21]
stages_1_cmd_status_uxl <= 2'h0; // @[Pipeline.scala:6:7, :7:14, :21:21]
stages_1_cmd_status_zero1 <= 8'h0; // @[Pipeline.scala:6:7, :7:14, :21:21]
stages_1_cmd_status_xs <= 2'h0; // @[Pipeline.scala:6:7, :7:14, :21:21]
stages_1_cmd_status_fs <= 2'h0; // @[Pipeline.scala:6:7, :7:14, :21:21]
stages_1_cmd_status_mpp <= 2'h0; // @[Pipeline.scala:6:7, :7:14, :21:21]
stages_1_cmd_status_vs <= 2'h0; // @[Pipeline.scala:6:7, :7:14, :21:21]
stages_1_dram_addr <= stages_0_dram_addr; // @[Pipeline.scala:21:21]
stages_1_spad_addr <= stages_0_spad_addr; // @[Pipeline.scala:21:21]
stages_1_I <= stages_0_I; // @[Pipeline.scala:21:21]
stages_1_K <= stages_0_K; // @[Pipeline.scala:21:21]
end
stages_1_cmd_inst_xd <= stalling_1 & stages_1_cmd_inst_xd; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_inst_xs1 <= stalling_1 & stages_1_cmd_inst_xs1; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_inst_xs2 <= stalling_1 & stages_1_cmd_inst_xs2; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_debug <= stalling_1 & stages_1_cmd_status_debug; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_cease <= stalling_1 & stages_1_cmd_status_cease; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_wfi <= stalling_1 & stages_1_cmd_status_wfi; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_dv <= stalling_1 & stages_1_cmd_status_dv; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_v <= stalling_1 & stages_1_cmd_status_v; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_sd <= stalling_1 & stages_1_cmd_status_sd; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_mpv <= stalling_1 & stages_1_cmd_status_mpv; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_gva <= stalling_1 & stages_1_cmd_status_gva; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_mbe <= stalling_1 & stages_1_cmd_status_mbe; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_sbe <= stalling_1 & stages_1_cmd_status_sbe; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_sd_rv32 <= stalling_1 & stages_1_cmd_status_sd_rv32; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_tsr <= stalling_1 & stages_1_cmd_status_tsr; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_tw <= stalling_1 & stages_1_cmd_status_tw; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_tvm <= stalling_1 & stages_1_cmd_status_tvm; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_mxr <= stalling_1 & stages_1_cmd_status_mxr; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_sum <= stalling_1 & stages_1_cmd_status_sum; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_mprv <= stalling_1 & stages_1_cmd_status_mprv; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_spp <= stalling_1 & stages_1_cmd_status_spp; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_mpie <= stalling_1 & stages_1_cmd_status_mpie; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_ube <= stalling_1 & stages_1_cmd_status_ube; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_spie <= stalling_1 & stages_1_cmd_status_spie; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_upie <= stalling_1 & stages_1_cmd_status_upie; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_mie <= stalling_1 & stages_1_cmd_status_mie; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_hie <= stalling_1 & stages_1_cmd_status_hie; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_sie <= stalling_1 & stages_1_cmd_status_sie; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
stages_1_cmd_status_uie <= stalling_1 & stages_1_cmd_status_uie; // @[Pipeline.scala:21:21, :23:27, :60:17, :61:13]
if (reset) begin // @[Pipeline.scala:6:7]
valids_0 <= 1'h0; // @[Pipeline.scala:22:25]
valids_1 <= 1'h0; // @[Pipeline.scala:22:25]
end
else begin // @[Pipeline.scala:6:7]
valids_0 <= _T_2 | stalling_1 & valids_0; // @[Decoupled.scala:51:35]
valids_1 <= valids_0 | ~io_out_ready_0 & valids_1; // @[Pipeline.scala:6:7, :22:25, :36:24, :37:19, :49:16, :50:12]
end
always @(posedge)
assign io_in_ready = io_in_ready_0; // @[Pipeline.scala:6:7]
assign io_out_valid = io_out_valid_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_inst_funct = io_out_bits_cmd_inst_funct_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_inst_rs2 = io_out_bits_cmd_inst_rs2_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_inst_rs1 = io_out_bits_cmd_inst_rs1_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_inst_xd = io_out_bits_cmd_inst_xd_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_inst_xs1 = io_out_bits_cmd_inst_xs1_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_inst_xs2 = io_out_bits_cmd_inst_xs2_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_inst_rd = io_out_bits_cmd_inst_rd_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_inst_opcode = io_out_bits_cmd_inst_opcode_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_rs1 = io_out_bits_cmd_rs1_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_rs2 = io_out_bits_cmd_rs2_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_debug = io_out_bits_cmd_status_debug_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_cease = io_out_bits_cmd_status_cease_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_wfi = io_out_bits_cmd_status_wfi_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_isa = io_out_bits_cmd_status_isa_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_dprv = io_out_bits_cmd_status_dprv_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_dv = io_out_bits_cmd_status_dv_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_prv = io_out_bits_cmd_status_prv_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_v = io_out_bits_cmd_status_v_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_sd = io_out_bits_cmd_status_sd_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_zero2 = io_out_bits_cmd_status_zero2_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_mpv = io_out_bits_cmd_status_mpv_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_gva = io_out_bits_cmd_status_gva_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_mbe = io_out_bits_cmd_status_mbe_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_sbe = io_out_bits_cmd_status_sbe_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_sxl = io_out_bits_cmd_status_sxl_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_uxl = io_out_bits_cmd_status_uxl_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_sd_rv32 = io_out_bits_cmd_status_sd_rv32_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_zero1 = io_out_bits_cmd_status_zero1_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_tsr = io_out_bits_cmd_status_tsr_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_tw = io_out_bits_cmd_status_tw_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_tvm = io_out_bits_cmd_status_tvm_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_mxr = io_out_bits_cmd_status_mxr_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_sum = io_out_bits_cmd_status_sum_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_mprv = io_out_bits_cmd_status_mprv_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_xs = io_out_bits_cmd_status_xs_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_fs = io_out_bits_cmd_status_fs_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_mpp = io_out_bits_cmd_status_mpp_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_vs = io_out_bits_cmd_status_vs_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_spp = io_out_bits_cmd_status_spp_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_mpie = io_out_bits_cmd_status_mpie_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_ube = io_out_bits_cmd_status_ube_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_spie = io_out_bits_cmd_status_spie_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_upie = io_out_bits_cmd_status_upie_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_mie = io_out_bits_cmd_status_mie_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_hie = io_out_bits_cmd_status_hie_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_sie = io_out_bits_cmd_status_sie_0; // @[Pipeline.scala:6:7]
assign io_out_bits_cmd_status_uie = io_out_bits_cmd_status_uie_0; // @[Pipeline.scala:6:7]
assign io_out_bits_dram_addr = io_out_bits_dram_addr_0; // @[Pipeline.scala:6:7]
assign io_out_bits_spad_addr = io_out_bits_spad_addr_0; // @[Pipeline.scala:6:7]
assign io_out_bits_I = io_out_bits_I_0; // @[Pipeline.scala:6:7]
assign io_out_bits_K = io_out_bits_K_0; // @[Pipeline.scala:6:7]
assign io_busy = io_busy_0; // @[Pipeline.scala:6:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ListBuffer.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import freechips.rocketchip.util._
case class ListBufferParameters[T <: Data](gen: T, queues: Int, entries: Int, bypass: Boolean)
{
val queueBits = log2Up(queues)
val entryBits = log2Up(entries)
}
class ListBufferPush[T <: Data](params: ListBufferParameters[T]) extends Bundle
{
val index = UInt(params.queueBits.W)
val data = Output(params.gen)
}
class ListBuffer[T <: Data](params: ListBufferParameters[T]) extends Module
{
override def desiredName = s"ListBuffer_${params.gen.typeName}_q${params.queues}_e${params.entries}"
val io = IO(new Bundle {
// push is visible on the same cycle; flow queues
val push = Flipped(Decoupled(new ListBufferPush(params)))
val valid = UInt(params.queues.W)
val pop = Flipped(Valid(UInt(params.queueBits.W)))
val data = Output(params.gen)
})
val valid = RegInit(0.U(params.queues.W))
val head = Mem(params.queues, UInt(params.entryBits.W))
val tail = Mem(params.queues, UInt(params.entryBits.W))
val used = RegInit(0.U(params.entries.W))
val next = Mem(params.entries, UInt(params.entryBits.W))
val data = Mem(params.entries, params.gen)
val freeOH = ~(leftOR(~used) << 1) & ~used
val freeIdx = OHToUInt(freeOH)
val valid_set = WireDefault(0.U(params.queues.W))
val valid_clr = WireDefault(0.U(params.queues.W))
val used_set = WireDefault(0.U(params.entries.W))
val used_clr = WireDefault(0.U(params.entries.W))
val push_tail = tail.read(io.push.bits.index)
val push_valid = valid(io.push.bits.index)
io.push.ready := !used.andR
when (io.push.fire) {
valid_set := UIntToOH(io.push.bits.index, params.queues)
used_set := freeOH
data.write(freeIdx, io.push.bits.data)
when (push_valid) {
next.write(push_tail, freeIdx)
} .otherwise {
head.write(io.push.bits.index, freeIdx)
}
tail.write(io.push.bits.index, freeIdx)
}
val pop_head = head.read(io.pop.bits)
val pop_valid = valid(io.pop.bits)
// Bypass push data to the peek port
io.data := (if (!params.bypass) data.read(pop_head) else Mux(!pop_valid, io.push.bits.data, data.read(pop_head)))
io.valid := (if (!params.bypass) valid else (valid | valid_set))
// It is an error to pop something that is not valid
assert (!io.pop.fire || (io.valid)(io.pop.bits))
when (io.pop.fire) {
used_clr := UIntToOH(pop_head, params.entries)
when (pop_head === tail.read(io.pop.bits)) {
valid_clr := UIntToOH(io.pop.bits, params.queues)
}
head.write(io.pop.bits, Mux(io.push.fire && push_valid && push_tail === pop_head, freeIdx, next.read(pop_head)))
}
// Empty bypass changes no state
when ((!params.bypass).B || !io.pop.valid || pop_valid) {
used := (used & ~used_clr) | used_set
valid := (valid & ~valid_clr) | valid_set
}
}
| module data_28x44( // @[ListBuffer.scala:52:18]
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [43:0] R0_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [43:0] W0_data
);
reg [43:0] Memory[0:27]; // @[ListBuffer.scala:52:18]
always @(posedge W0_clk) begin // @[ListBuffer.scala:52:18]
if (W0_en & 1'h1) // @[ListBuffer.scala:52:18]
Memory[W0_addr] <= W0_data; // @[ListBuffer.scala:52:18]
always @(posedge)
assign R0_data = R0_en ? Memory[R0_addr] : 44'bx; // @[ListBuffer.scala:52:18]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File JtagShifter.scala:
// See LICENSE.jtag for license details.
package freechips.rocketchip.jtag
import chisel3._
import chisel3.reflect.DataMirror
import chisel3.internal.firrtl.KnownWidth
import chisel3.util.{Cat, Valid}
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util.property
/** Base JTAG shifter IO, viewed from input to shift register chain.
* Can be chained together.
*/
class ShifterIO extends Bundle {
val shift = Bool() // advance the scan chain on clock high
val data = Bool() // as input: bit to be captured into shifter MSB on next rising edge; as output: value of shifter LSB
val capture = Bool() // high in the CaptureIR/DR state when this chain is selected
val update = Bool() // high in the UpdateIR/DR state when this chain is selected
/** Sets a output shifter IO's control signals from a input shifter IO's control signals.
*/
def chainControlFrom(in: ShifterIO): Unit = {
shift := in.shift
capture := in.capture
update := in.update
}
}
trait ChainIO extends Bundle {
val chainIn = Input(new ShifterIO)
val chainOut = Output(new ShifterIO)
}
class Capture[+T <: Data](gen: T) extends Bundle {
val bits = Input(gen) // data to capture, should be always valid
val capture = Output(Bool()) // will be high in capture state (single cycle), captured on following rising edge
}
object Capture {
def apply[T <: Data](gen: T): Capture[T] = new Capture(gen)
}
/** Trait that all JTAG chains (data and instruction registers) must extend, providing basic chain
* IO.
*/
trait Chain extends Module {
val io: ChainIO
}
/** One-element shift register, data register for bypass mode.
*
* Implements Clause 10.
*/
class JtagBypassChain(implicit val p: Parameters) extends Chain {
class ModIO extends ChainIO
val io = IO(new ModIO)
io.chainOut chainControlFrom io.chainIn
val reg = Reg(Bool()) // 10.1.1a single shift register stage
io.chainOut.data := reg
property.cover(io.chainIn.capture, "bypass_chain_capture", "JTAG; bypass_chain_capture; This Bypass Chain captured data")
when (io.chainIn.capture) {
reg := false.B // 10.1.1b capture logic 0 on TCK rising
} .elsewhen (io.chainIn.shift) {
reg := io.chainIn.data
}
assert(!(io.chainIn.capture && io.chainIn.update)
&& !(io.chainIn.capture && io.chainIn.shift)
&& !(io.chainIn.update && io.chainIn.shift))
}
object JtagBypassChain {
def apply()(implicit p: Parameters) = new JtagBypassChain
}
/** Simple shift register with parallel capture only, for read-only data registers.
*
* Number of stages is the number of bits in gen, which must have a known width.
*
* Useful notes:
* 7.2.1c shifter shifts on TCK rising edge
* 4.3.2a TDI captured on TCK rising edge, 6.1.2.1b assumed changes on TCK falling edge
*/
class CaptureChain[+T <: Data](gen: T)(implicit val p: Parameters) extends Chain {
override def desiredName = s"CaptureChain_${gen.typeName}"
class ModIO extends ChainIO {
val capture = Capture(gen)
}
val io = IO(new ModIO)
io.chainOut chainControlFrom io.chainIn
val n = DataMirror.widthOf(gen) match {
case KnownWidth(x) => x
case _ => require(false, s"can't generate chain for unknown width data type $gen"); -1 // TODO: remove -1 type hack
}
val regs = (0 until n) map (x => Reg(Bool()))
io.chainOut.data := regs(0)
property.cover(io.chainIn.capture, "chain_capture", "JTAG; chain_capture; This Chain captured data")
when (io.chainIn.capture) {
(0 until n) map (x => regs(x) := io.capture.bits.asUInt(x))
io.capture.capture := true.B
} .elsewhen (io.chainIn.shift) {
regs(n-1) := io.chainIn.data
(0 until n-1) map (x => regs(x) := regs(x+1))
io.capture.capture := false.B
} .otherwise {
io.capture.capture := false.B
}
assert(!(io.chainIn.capture && io.chainIn.update)
&& !(io.chainIn.capture && io.chainIn.shift)
&& !(io.chainIn.update && io.chainIn.shift))
}
object CaptureChain {
def apply[T <: Data](gen: T)(implicit p: Parameters) = new CaptureChain(gen)
}
/** Simple shift register with parallel capture and update. Useful for general instruction and data
* scan registers.
*
* Number of stages is the max number of bits in genCapture and genUpdate, both of which must have
* known widths. If there is a width mismatch, the unused most significant bits will be zero.
*
* Useful notes:
* 7.2.1c shifter shifts on TCK rising edge
* 4.3.2a TDI captured on TCK rising edge, 6.1.2.1b assumed changes on TCK falling edge
*/
class CaptureUpdateChain[+T <: Data, +V <: Data](genCapture: T, genUpdate: V)(implicit val p: Parameters) extends Chain {
override def desiredName = s"CaptureUpdateChain_${genCapture.typeName}_To_${genUpdate.typeName}"
class ModIO extends ChainIO {
val capture = Capture(genCapture)
val update = Valid(genUpdate) // valid high when in update state (single cycle), contents may change any time after
}
val io = IO(new ModIO)
io.chainOut chainControlFrom io.chainIn
val captureWidth = DataMirror.widthOf(genCapture) match {
case KnownWidth(x) => x
case _ => require(false, s"can't generate chain for unknown width data type $genCapture"); -1 // TODO: remove -1 type hack
}
val updateWidth = DataMirror.widthOf(genUpdate) match {
case KnownWidth(x) => x
case _ => require(false, s"can't generate chain for unknown width data type $genUpdate"); -1 // TODO: remove -1 type hack
}
val n = math.max(captureWidth, updateWidth)
val regs = (0 until n) map (x => Reg(Bool()))
io.chainOut.data := regs(0)
val updateBits = Cat(regs.reverse)(updateWidth-1, 0)
io.update.bits := updateBits.asTypeOf(io.update.bits)
val captureBits = io.capture.bits.asUInt
property.cover(io.chainIn.capture, "chain_capture", "JTAG;chain_capture; This Chain captured data")
property.cover(io.chainIn.capture, "chain_update", "JTAG;chain_update; This Chain updated data")
when (io.chainIn.capture) {
(0 until math.min(n, captureWidth)) map (x => regs(x) := captureBits(x))
(captureWidth until n) map (x => regs(x) := 0.U)
io.capture.capture := true.B
io.update.valid := false.B
} .elsewhen (io.chainIn.update) {
io.capture.capture := false.B
io.update.valid := true.B
} .elsewhen (io.chainIn.shift) {
regs(n-1) := io.chainIn.data
(0 until n-1) map (x => regs(x) := regs(x+1))
io.capture.capture := false.B
io.update.valid := false.B
} .otherwise {
io.capture.capture := false.B
io.update.valid := false.B
}
assert(!(io.chainIn.capture && io.chainIn.update)
&& !(io.chainIn.capture && io.chainIn.shift)
&& !(io.chainIn.update && io.chainIn.shift))
}
object CaptureUpdateChain {
/** Capture-update chain with matching capture and update types.
*/
def apply[T <: Data](gen: T)(implicit p: Parameters) = new CaptureUpdateChain(gen, gen)
def apply[T <: Data, V <: Data](genCapture: T, genUpdate: V)(implicit p: Parameters) =
new CaptureUpdateChain(genCapture, genUpdate)
}
| module CaptureUpdateChain_DTMInfo_To_DTMInfo( // @[JtagShifter.scala:137:7]
input clock, // @[JtagShifter.scala:137:7]
input reset, // @[JtagShifter.scala:137:7]
input io_chainIn_shift, // @[JtagShifter.scala:143:14]
input io_chainIn_data, // @[JtagShifter.scala:143:14]
input io_chainIn_capture, // @[JtagShifter.scala:143:14]
input io_chainIn_update, // @[JtagShifter.scala:143:14]
output io_chainOut_data, // @[JtagShifter.scala:143:14]
input [1:0] io_capture_bits_dmiStatus, // @[JtagShifter.scala:143:14]
output io_update_valid, // @[JtagShifter.scala:143:14]
output io_update_bits_dmireset // @[JtagShifter.scala:143:14]
);
reg regs_0; // @[JtagShifter.scala:156:39]
reg regs_1; // @[JtagShifter.scala:156:39]
reg regs_2; // @[JtagShifter.scala:156:39]
reg regs_3; // @[JtagShifter.scala:156:39]
reg regs_4; // @[JtagShifter.scala:156:39]
reg regs_5; // @[JtagShifter.scala:156:39]
reg regs_6; // @[JtagShifter.scala:156:39]
reg regs_7; // @[JtagShifter.scala:156:39]
reg regs_8; // @[JtagShifter.scala:156:39]
reg regs_9; // @[JtagShifter.scala:156:39]
reg regs_10; // @[JtagShifter.scala:156:39]
reg regs_11; // @[JtagShifter.scala:156:39]
reg regs_12; // @[JtagShifter.scala:156:39]
reg regs_13; // @[JtagShifter.scala:156:39]
reg regs_14; // @[JtagShifter.scala:156:39]
reg regs_15; // @[JtagShifter.scala:156:39]
reg regs_16; // @[JtagShifter.scala:156:39]
reg regs_17; // @[JtagShifter.scala:156:39]
reg regs_18; // @[JtagShifter.scala:156:39]
reg regs_19; // @[JtagShifter.scala:156:39]
reg regs_20; // @[JtagShifter.scala:156:39]
reg regs_21; // @[JtagShifter.scala:156:39]
reg regs_22; // @[JtagShifter.scala:156:39]
reg regs_23; // @[JtagShifter.scala:156:39]
reg regs_24; // @[JtagShifter.scala:156:39]
reg regs_25; // @[JtagShifter.scala:156:39]
reg regs_26; // @[JtagShifter.scala:156:39]
reg regs_27; // @[JtagShifter.scala:156:39]
reg regs_28; // @[JtagShifter.scala:156:39]
reg regs_29; // @[JtagShifter.scala:156:39]
reg regs_30; // @[JtagShifter.scala:156:39]
reg regs_31; // @[JtagShifter.scala:156:39] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_67( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_67 io_out_source_valid ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File dcache.scala:
//******************************************************************************
// Ported from Rocket-Chip
// See LICENSE.Berkeley and LICENSE.SiFive in Rocket-Chip for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v4.lsu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.tile._
import freechips.rocketchip.util._
import freechips.rocketchip.rocket._
import boom.v4.common._
import boom.v4.exu.BrUpdateInfo
import boom.v4.util._
class BoomWritebackUnit(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) {
val io = IO(new Bundle {
val req = Flipped(Decoupled(new WritebackReq(edge.bundle)))
val meta_read = Decoupled(new L1MetaReadReq)
val resp = Output(Bool())
val idx = Output(Valid(UInt()))
val data_req = Decoupled(new L1DataReadReq)
val data_resp = Input(UInt(encRowBits.W))
val mem_grant = Input(Bool())
val release = Decoupled(new TLBundleC(edge.bundle))
val lsu_release = Decoupled(new TLBundleC(edge.bundle))
})
val req = Reg(new WritebackReq(edge.bundle))
val s_invalid :: s_fill_buffer :: s_lsu_release :: s_active :: s_grant :: Nil = Enum(5)
val state = RegInit(s_invalid)
val r1_data_req_fired = RegInit(false.B)
val r2_data_req_fired = RegInit(false.B)
val r1_data_req_cnt = Reg(UInt(log2Up(refillCycles+1).W))
val r2_data_req_cnt = Reg(UInt(log2Up(refillCycles+1).W))
val data_req_cnt = RegInit(0.U(log2Up(refillCycles+1).W))
val (_, last_beat, all_beats_done, beat_count) = edge.count(io.release)
val wb_buffer = Reg(Vec(refillCycles, UInt(encRowBits.W)))
val acked = RegInit(false.B)
io.idx.valid := state =/= s_invalid
io.idx.bits := req.idx
io.release.valid := false.B
io.release.bits := DontCare
io.req.ready := false.B
io.meta_read.valid := false.B
io.meta_read.bits := DontCare
io.data_req.valid := false.B
io.data_req.bits := DontCare
io.resp := false.B
io.lsu_release.valid := false.B
io.lsu_release.bits := DontCare
val r_address = Cat(req.tag, req.idx) << blockOffBits
val id = cfg.nMSHRs
val probeResponse = edge.ProbeAck(
fromSource = req.source,
toAddress = r_address,
lgSize = lgCacheBlockBytes.U,
reportPermissions = req.param,
data = wb_buffer(data_req_cnt))
val voluntaryRelease = edge.Release(
fromSource = id.U,
toAddress = r_address,
lgSize = lgCacheBlockBytes.U,
shrinkPermissions = req.param,
data = wb_buffer(data_req_cnt))._2
when (state === s_invalid) {
io.req.ready := true.B
when (io.req.fire) {
state := s_fill_buffer
data_req_cnt := 0.U
req := io.req.bits
acked := false.B
}
} .elsewhen (state === s_fill_buffer) {
io.meta_read.valid := data_req_cnt < refillCycles.U
io.meta_read.bits.idx := req.idx
io.meta_read.bits.tag := req.tag
io.data_req.valid := data_req_cnt < refillCycles.U
io.data_req.bits.way_en := req.way_en
io.data_req.bits.addr := (if(refillCycles > 1)
Cat(req.idx, data_req_cnt(log2Up(refillCycles)-1,0))
else req.idx) << rowOffBits
r1_data_req_fired := false.B
r1_data_req_cnt := 0.U
r2_data_req_fired := r1_data_req_fired
r2_data_req_cnt := r1_data_req_cnt
when (io.data_req.fire && io.meta_read.fire) {
r1_data_req_fired := true.B
r1_data_req_cnt := data_req_cnt
data_req_cnt := data_req_cnt + 1.U
}
when (r2_data_req_fired) {
wb_buffer(r2_data_req_cnt) := io.data_resp
when (r2_data_req_cnt === (refillCycles-1).U) {
io.resp := true.B
state := s_lsu_release
data_req_cnt := 0.U
}
}
} .elsewhen (state === s_lsu_release) {
io.lsu_release.valid := true.B
io.lsu_release.bits := probeResponse
when (io.lsu_release.fire) {
state := s_active
}
} .elsewhen (state === s_active) {
io.release.valid := data_req_cnt < refillCycles.U
io.release.bits := Mux(req.voluntary, voluntaryRelease, probeResponse)
when (io.mem_grant) {
acked := true.B
}
when (io.release.fire) {
data_req_cnt := data_req_cnt + 1.U
}
when ((data_req_cnt === (refillCycles-1).U) && io.release.fire) {
state := Mux(req.voluntary, s_grant, s_invalid)
}
} .elsewhen (state === s_grant) {
when (io.mem_grant) {
acked := true.B
}
when (acked) {
state := s_invalid
}
}
}
class BoomProbeUnit(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) {
val io = IO(new Bundle {
val req = Flipped(Decoupled(new TLBundleB(edge.bundle)))
val rep = Decoupled(new TLBundleC(edge.bundle))
val meta_read = Decoupled(new L1MetaReadReq)
val meta_write = Decoupled(new L1MetaWriteReq)
val wb_req = Decoupled(new WritebackReq(edge.bundle))
val way_en = Input(UInt(nWays.W))
val wb_rdy = Input(Bool()) // Is writeback unit currently busy? If so need to retry meta read when its done
val mshr_rdy = Input(Bool()) // Is MSHR ready for this request to proceed?
val mshr_wb_rdy = Output(Bool()) // Should we block MSHR writebacks while we finish our own?
val block_state = Input(new ClientMetadata())
val lsu_release = Decoupled(new TLBundleC(edge.bundle))
val state = Output(Valid(UInt(coreMaxAddrBits.W)))
})
val (s_invalid :: s_meta_read :: s_meta_resp :: s_mshr_req ::
s_mshr_resp :: s_lsu_release :: s_release :: s_writeback_req :: s_writeback_resp ::
s_meta_write :: s_meta_write_resp :: Nil) = Enum(11)
val state = RegInit(s_invalid)
val req = Reg(new TLBundleB(edge.bundle))
val req_idx = req.address(idxMSB, idxLSB)
val req_tag = req.address >> untagBits
val way_en = Reg(UInt())
val tag_matches = way_en.orR
val old_coh = Reg(new ClientMetadata)
val miss_coh = ClientMetadata.onReset
val reply_coh = Mux(tag_matches, old_coh, miss_coh)
val (is_dirty, report_param, new_coh) = reply_coh.onProbe(req.param)
io.state.valid := state =/= s_invalid
io.state.bits := req.address
io.req.ready := state === s_invalid
io.rep.valid := state === s_release
io.rep.bits := edge.ProbeAck(req, report_param)
assert(!io.rep.valid || !edge.hasData(io.rep.bits),
"ProbeUnit should not send ProbeAcks with data, WritebackUnit should handle it")
io.meta_read.valid := state === s_meta_read
io.meta_read.bits.idx := req_idx
io.meta_read.bits.tag := req_tag
io.meta_read.bits.way_en := ~(0.U(nWays.W))
io.meta_write.valid := state === s_meta_write
io.meta_write.bits.way_en := way_en
io.meta_write.bits.idx := req_idx
io.meta_write.bits.tag := req_tag
io.meta_write.bits.data.tag := req_tag
io.meta_write.bits.data.coh := new_coh
io.wb_req.valid := state === s_writeback_req
io.wb_req.bits.source := req.source
io.wb_req.bits.idx := req_idx
io.wb_req.bits.tag := req_tag
io.wb_req.bits.param := report_param
io.wb_req.bits.way_en := way_en
io.wb_req.bits.voluntary := false.B
io.mshr_wb_rdy := !state.isOneOf(s_release, s_writeback_req, s_writeback_resp, s_meta_write, s_meta_write_resp)
io.lsu_release.valid := state === s_lsu_release
io.lsu_release.bits := edge.ProbeAck(req, report_param)
// state === s_invalid
when (state === s_invalid) {
when (io.req.fire) {
state := s_meta_read
req := io.req.bits
}
} .elsewhen (state === s_meta_read) {
when (io.meta_read.fire) {
state := s_meta_resp
}
} .elsewhen (state === s_meta_resp) {
// we need to wait one cycle for the metadata to be read from the array
state := s_mshr_req
} .elsewhen (state === s_mshr_req) {
old_coh := io.block_state
way_en := io.way_en
// if the read didn't go through, we need to retry
state := Mux(io.mshr_rdy && io.wb_rdy, s_mshr_resp, s_meta_read)
} .elsewhen (state === s_mshr_resp) {
state := Mux(tag_matches && is_dirty, s_writeback_req, s_lsu_release)
} .elsewhen (state === s_lsu_release) {
when (io.lsu_release.fire) {
state := s_release
}
} .elsewhen (state === s_release) {
when (io.rep.ready) {
state := Mux(tag_matches, s_meta_write, s_invalid)
}
} .elsewhen (state === s_writeback_req) {
when (io.wb_req.fire) {
state := s_writeback_resp
}
} .elsewhen (state === s_writeback_resp) {
// wait for the writeback request to finish before updating the metadata
when (io.wb_req.ready) {
state := s_meta_write
}
} .elsewhen (state === s_meta_write) {
when (io.meta_write.fire) {
state := s_meta_write_resp
}
} .elsewhen (state === s_meta_write_resp) {
state := s_invalid
}
}
class BoomL1MetaReadReq(implicit p: Parameters) extends BoomBundle()(p) {
val req = Vec(lsuWidth, new L1MetaReadReq)
}
class BoomL1DataReadReq(implicit p: Parameters) extends BoomBundle()(p) {
val req = Vec(lsuWidth, new L1DataReadReq)
val valid = Vec(lsuWidth, Bool())
}
abstract class AbstractBoomDataArray(implicit p: Parameters) extends BoomModule with HasL1HellaCacheParameters {
val io = IO(new BoomBundle {
val read = Input(Vec(lsuWidth, Valid(new L1DataReadReq)))
val write = Input(Valid(new L1DataWriteReq))
val resp = Output(Vec(lsuWidth, Vec(nWays, Bits(encRowBits.W))))
val s1_nacks = Output(Vec(lsuWidth, Bool()))
})
def pipeMap[T <: Data](f: Int => T) = VecInit((0 until lsuWidth).map(f))
}
class BoomDuplicatedDataArray(implicit p: Parameters) extends AbstractBoomDataArray
{
val waddr = io.write.bits.addr >> rowOffBits
for (j <- 0 until lsuWidth) {
val raddr = io.read(j).bits.addr >> rowOffBits
for (w <- 0 until nWays) {
val array = DescribedSRAM(
name = s"array_${w}_${j}",
desc = "Non-blocking DCache Data Array",
size = nSets * refillCycles,
data = Vec(rowWords, Bits(encDataBits.W))
)
when (io.write.bits.way_en(w) && io.write.valid) {
val data = VecInit((0 until rowWords) map (i => io.write.bits.data(encDataBits*(i+1)-1,encDataBits*i)))
array.write(waddr, data, io.write.bits.wmask.asBools)
}
if (dcacheSinglePorted)
io.resp(j)(w) := RegNext(array.read(raddr, io.read(j).bits.way_en(w) && io.read(j).valid).asUInt)
else
io.resp(j)(w) := RegNext(array.read(raddr, io.read(j).valid).asUInt)
}
io.s1_nacks(j) := false.B
}
}
class BoomBankedDataArray(implicit p: Parameters) extends AbstractBoomDataArray {
val nBanks = boomParams.numDCacheBanks
val bankSize = nSets * refillCycles / nBanks
require (nBanks >= lsuWidth)
require (bankSize > 0)
val bankBits = log2Ceil(nBanks)
val bankOffBits = log2Ceil(rowWords) + log2Ceil(wordBytes)
val bidxBits = log2Ceil(bankSize)
val bidxOffBits = bankOffBits + bankBits
//----------------------------------------------------------------------------------------------------
val s0_rbanks = if (nBanks > 1) VecInit(io.read.map(r => (r.bits.addr >> bankOffBits)(bankBits-1,0))) else VecInit(0.U)
val s0_wbank = if (nBanks > 1) (io.write.bits.addr >> bankOffBits)(bankBits-1,0) else 0.U
val s0_ridxs = VecInit(io.read.map(r => (r.bits.addr >> bidxOffBits)(bidxBits-1,0)))
val s0_widx = (io.write.bits.addr >> bidxOffBits)(bidxBits-1,0)
val s0_read_valids = VecInit(io.read.map(_.valid))
val s0_bank_conflicts = pipeMap(w => {
((s0_rbanks(w) === s0_wbank) && io.write.valid && dcacheSinglePorted.B) ||
(0 until w).foldLeft(false.B)((c,i) => c || io.read(i).valid && s0_rbanks(i) === s0_rbanks(w))
})
val s0_do_bank_read = s0_read_valids zip s0_bank_conflicts map {case (v,c) => v && !c}
val s0_bank_read_gnts = Transpose(VecInit(s0_rbanks zip s0_do_bank_read map {case (b,d) => VecInit((UIntToOH(b) & Fill(nBanks,d)).asBools)}))
val s0_bank_write_gnt = (UIntToOH(s0_wbank) & Fill(nBanks, io.write.valid)).asBools
//----------------------------------------------------------------------------------------------------
val s1_rbanks = RegNext(s0_rbanks)
val s1_ridxs = RegNext(s0_ridxs)
val s1_read_valids = RegNext(s0_read_valids)
val s1_pipe_selection = pipeMap(i => VecInit(PriorityEncoderOH(pipeMap(j =>
if (j < i) s1_read_valids(j) && s1_rbanks(j) === s1_rbanks(i)
else if (j == i) true.B else false.B))))
val s1_ridx_match = pipeMap(i => pipeMap(j => if (j < i) s1_ridxs(j) === s1_ridxs(i)
else if (j == i) true.B else false.B))
val s1_nacks = pipeMap(w => s1_read_valids(w)
&& (!RegNext(s0_do_bank_read(w)) || (s1_pipe_selection(w).asUInt & ~s1_ridx_match(w).asUInt).orR)
)
val s1_bank_selection = pipeMap(w => Mux1H(s1_pipe_selection(w), s1_rbanks))
//----------------------------------------------------------------------------------------------------
val s2_bank_selection = RegNext(s1_bank_selection)
io.s1_nacks := s1_nacks
val data_arrays = Seq.tabulate(nBanks) {
b => DescribedSRAM(
name = s"array_${b}",
desc = "Boom DCache data array",
size = bankSize,
data = Vec(nWays * rowWords, Bits(encDataBits.W))
)
}
val s2_bank_reads = Reg(Vec(nBanks, Vec(nWays, Bits(encRowBits.W))))
for (b <- 0 until nBanks) {
val array = data_arrays(b)
val ridx = Mux1H(s0_bank_read_gnts(b), s0_ridxs)
val way_en = Mux1H(s0_bank_read_gnts(b), io.read.map(_.bits.way_en))
val write_en = s0_bank_write_gnt(b)
val write_mask = Cat(Seq.tabulate(nWays) { w =>
Mux(io.write.bits.way_en(w), io.write.bits.wmask, 0.U(rowWords.W))
}.reverse).asBools
val read_en = WireInit(s0_bank_read_gnts(b).reduce(_||_))
s2_bank_reads(b) := (if (dcacheSinglePorted) {
assert(!(read_en && write_en))
array.read(ridx, !write_en && read_en)
} else {
array.read(ridx, read_en)
}).asTypeOf(Vec(nWays, Bits(encRowBits.W)))
when (write_en) {
val data = Wire(Vec(nWays * rowWords, Bits(encDataBits.W)))
for (w <- 0 until nWays) {
for (i <- 0 until rowWords) {
data(w*rowWords+i) := io.write.bits.data(encDataBits*(i+1)-1,encDataBits*i)
}
}
array.write(s0_widx, data, write_mask)
}
}
for (w <- 0 until nWays) {
for (i <- 0 until lsuWidth) {
io.resp(i)(w) := s2_bank_reads(s2_bank_selection(i))(w)
}
}
}
/**
* Top level class wrapping a non-blocking dcache.
*
* @param hartid hardware thread for the cache
*/
class BoomNonBlockingDCache(staticIdForMetadataUseOnly: Int)(implicit p: Parameters) extends LazyModule
{
private val tileParams = p(TileKey)
protected val cfg = tileParams.dcache.get
protected def cacheClientParameters = cfg.scratch.map(x => Seq()).getOrElse(Seq(TLMasterParameters.v1(
name = s"Core ${staticIdForMetadataUseOnly} DCache",
sourceId = IdRange(0, 1 max (cfg.nMSHRs + 1)),
supportsProbe = TransferSizes(cfg.blockBytes, cfg.blockBytes))))
protected def mmioClientParameters = Seq(TLMasterParameters.v1(
name = s"Core ${staticIdForMetadataUseOnly} DCache MMIO",
sourceId = IdRange(cfg.nMSHRs + 1, cfg.nMSHRs + 1 + cfg.nMMIOs),
requestFifo = true))
val node = TLClientNode(Seq(TLMasterPortParameters.v1(
cacheClientParameters ++ mmioClientParameters,
minLatency = 1)))
lazy val module = new BoomNonBlockingDCacheModule(this)
def flushOnFenceI = cfg.scratch.isEmpty && !node.edges.out(0).manager.managers.forall(m => !m.supportsAcquireT || !m.executable || m.regionType >= RegionType.TRACKED || m.regionType <= RegionType.IDEMPOTENT)
require(!tileParams.core.haveCFlush || cfg.scratch.isEmpty, "CFLUSH_D_L1 instruction requires a D$")
}
class BoomDCacheBundle(implicit p: Parameters, edge: TLEdgeOut) extends BoomBundle()(p) {
val errors = new DCacheErrors
val lsu = Flipped(new LSUDMemIO)
}
class BoomNonBlockingDCacheModule(outer: BoomNonBlockingDCache) extends LazyModuleImp(outer)
with HasL1HellaCacheParameters
with HasBoomCoreParameters
{
implicit val edge = outer.node.edges.out(0)
val (tl_out, _) = outer.node.out(0)
val io = IO(new BoomDCacheBundle)
io.errors := DontCare
private val fifoManagers = edge.manager.managers.filter(TLFIFOFixer.allVolatile)
fifoManagers.foreach { m =>
require (m.fifoId == fifoManagers.head.fifoId,
s"IOMSHRs must be FIFO for all regions with effects, but HellaCache sees ${m.nodePath.map(_.name)}")
}
def widthMap[T <: Data](f: Int => T) = VecInit((0 until lsuWidth).map(f))
val t_replay :: t_probe :: t_wb :: t_mshr_meta_read :: t_lsu :: t_prefetch :: Nil = Enum(6)
val wb = Module(new BoomWritebackUnit)
val prober = Module(new BoomProbeUnit)
val mshrs = Module(new BoomMSHRFile)
mshrs.io.clear_all := io.lsu.force_order
mshrs.io.brupdate := io.lsu.brupdate
mshrs.io.exception := io.lsu.exception
mshrs.io.rob_pnr_idx := io.lsu.rob_pnr_idx
mshrs.io.rob_head_idx := io.lsu.rob_head_idx
// tags
def onReset = L1Metadata(0.U, ClientMetadata.onReset)
val meta = Seq.fill(lsuWidth) { Module(new L1MetadataArray(onReset _)) }
val metaWriteArb = Module(new Arbiter(new L1MetaWriteReq, 2))
// 0 goes to MSHR refills, 1 goes to prober
val metaReadArb = Module(new Arbiter(new BoomL1MetaReadReq, 6))
// 0 goes to MSHR replays, 1 goes to prober, 2 goes to wb, 3 goes to MSHR meta read,
// 4 goes to pipeline, 5 goes to prefetcher
metaReadArb.io.in := DontCare
for (w <- 0 until lsuWidth) {
meta(w).io.write.valid := metaWriteArb.io.out.fire
meta(w).io.write.bits := metaWriteArb.io.out.bits
meta(w).io.read.valid := metaReadArb.io.out.valid
meta(w).io.read.bits := metaReadArb.io.out.bits.req(w)
}
metaReadArb.io.out.ready := meta.map(_.io.read.ready).reduce(_||_)
metaWriteArb.io.out.ready := meta.map(_.io.write.ready).reduce(_||_)
// data
val data = Module(if (boomParams.numDCacheBanks == 1) new BoomDuplicatedDataArray else new BoomBankedDataArray)
val dataWriteArb = Module(new Arbiter(new L1DataWriteReq, 2))
// 0 goes to pipeline, 1 goes to MSHR refills
val dataReadArb = Module(new Arbiter(new BoomL1DataReadReq, 3))
// 0 goes to MSHR replays, 1 goes to wb, 2 goes to pipeline
dataReadArb.io.in := DontCare
for (w <- 0 until lsuWidth) {
data.io.read(w).valid := dataReadArb.io.out.bits.valid(w) && dataReadArb.io.out.valid
data.io.read(w).bits := dataReadArb.io.out.bits.req(w)
}
dataReadArb.io.out.ready := true.B
data.io.write.valid := dataWriteArb.io.out.fire
data.io.write.bits := dataWriteArb.io.out.bits
dataWriteArb.io.out.ready := true.B
val singlePortedDCacheWrite = data.io.write.valid && dcacheSinglePorted.B
// ------------
// New requests
// In a 1-wide LSU, load/store wakeups and MSHR resps contend for same port, so
// we should block incoming requests when the MSHR trying to respond
val block_incoming_reqs = (lsuWidth == 1).B && mshrs.io.resp.valid
io.lsu.req.ready := metaReadArb.io.in(4).ready && dataReadArb.io.in(2).ready && !block_incoming_reqs
metaReadArb.io.in(4).valid := io.lsu.req.valid && !block_incoming_reqs
dataReadArb.io.in(2).valid := io.lsu.req.valid && !block_incoming_reqs
for (w <- 0 until lsuWidth) {
// Tag read for new requests
metaReadArb.io.in(4).bits.req(w).idx := io.lsu.req.bits(w).bits.addr >> blockOffBits
metaReadArb.io.in(4).bits.req(w).way_en := DontCare
metaReadArb.io.in(4).bits.req(w).tag := DontCare
// Data read for new requests
dataReadArb.io.in(2).bits.valid(w) := io.lsu.req.bits(w).valid
dataReadArb.io.in(2).bits.req(w).addr := io.lsu.req.bits(w).bits.addr
dataReadArb.io.in(2).bits.req(w).way_en := ~0.U(nWays.W)
}
// ------------
// MSHR Replays
val replay_req = Wire(Vec(lsuWidth, new BoomDCacheReq))
replay_req := DontCare
replay_req(0).uop := mshrs.io.replay.bits.uop
replay_req(0).addr := mshrs.io.replay.bits.addr
replay_req(0).data := mshrs.io.replay.bits.data
replay_req(0).is_hella := mshrs.io.replay.bits.is_hella
// Don't let replays get nacked due to conflict with dcache write
mshrs.io.replay.ready := metaReadArb.io.in(0).ready && dataReadArb.io.in(0).ready && !singlePortedDCacheWrite
// Tag read for MSHR replays
// We don't actually need to read the metadata, for replays we already know our way
metaReadArb.io.in(0).valid := mshrs.io.replay.valid && !singlePortedDCacheWrite
metaReadArb.io.in(0).bits.req(0).idx := mshrs.io.replay.bits.addr >> blockOffBits
metaReadArb.io.in(0).bits.req(0).way_en := DontCare
metaReadArb.io.in(0).bits.req(0).tag := DontCare
// Data read for MSHR replays
dataReadArb.io.in(0).valid := mshrs.io.replay.valid && !singlePortedDCacheWrite
dataReadArb.io.in(0).bits.req(0).addr := mshrs.io.replay.bits.addr
dataReadArb.io.in(0).bits.req(0).way_en := mshrs.io.replay.bits.way_en
dataReadArb.io.in(0).bits.valid := widthMap(w => (w == 0).B)
// -----------
// MSHR Meta read
val mshr_read_req = Wire(Vec(lsuWidth, new BoomDCacheReq))
mshr_read_req := DontCare
mshr_read_req(0).uop := NullMicroOp
mshr_read_req(0).addr := Cat(mshrs.io.meta_read.bits.tag, mshrs.io.meta_read.bits.idx) << blockOffBits
mshr_read_req(0).data := DontCare
mshr_read_req(0).is_hella := false.B
metaReadArb.io.in(3).valid := mshrs.io.meta_read.valid
metaReadArb.io.in(3).bits.req(0) := mshrs.io.meta_read.bits
mshrs.io.meta_read.ready := metaReadArb.io.in(3).ready
// -----------
// Write-backs
val wb_fire = wb.io.meta_read.fire && wb.io.data_req.fire
val wb_req = Wire(Vec(lsuWidth, new BoomDCacheReq))
wb_req := DontCare
wb_req(0).uop := NullMicroOp
wb_req(0).addr := Cat(wb.io.meta_read.bits.tag, wb.io.data_req.bits.addr)
wb_req(0).data := DontCare
wb_req(0).is_hella := false.B
// Couple the two decoupled interfaces of the WBUnit's meta_read and data_read
// Can't launch data read if possibility of conflict w. write
// Tag read for write-back
metaReadArb.io.in(2).valid := wb.io.meta_read.valid && !singlePortedDCacheWrite
metaReadArb.io.in(2).bits.req(0) := wb.io.meta_read.bits
wb.io.meta_read.ready := metaReadArb.io.in(2).ready && dataReadArb.io.in(1).ready && !singlePortedDCacheWrite
// Data read for write-back
dataReadArb.io.in(1).valid := wb.io.data_req.valid && !singlePortedDCacheWrite
dataReadArb.io.in(1).bits.req(0) := wb.io.data_req.bits
dataReadArb.io.in(1).bits.valid := widthMap(w => (w == 0).B)
wb.io.data_req.ready := metaReadArb.io.in(2).ready && dataReadArb.io.in(1).ready && !singlePortedDCacheWrite
assert(!(wb.io.meta_read.fire ^ wb.io.data_req.fire))
// -------
// Prober
val prober_fire = prober.io.meta_read.fire
val prober_req = Wire(Vec(lsuWidth, new BoomDCacheReq))
prober_req := DontCare
prober_req(0).uop := NullMicroOp
prober_req(0).addr := Cat(prober.io.meta_read.bits.tag, prober.io.meta_read.bits.idx) << blockOffBits
prober_req(0).data := DontCare
prober_req(0).is_hella := false.B
// Tag read for prober
metaReadArb.io.in(1).valid := prober.io.meta_read.valid
metaReadArb.io.in(1).bits.req(0) := prober.io.meta_read.bits
prober.io.meta_read.ready := metaReadArb.io.in(1).ready
// Prober does not need to read data array
// -------
// Prefetcher
val prefetch_fire = mshrs.io.prefetch.fire
val prefetch_req = Wire(Vec(lsuWidth, new BoomDCacheReq))
prefetch_req := DontCare
prefetch_req(0) := mshrs.io.prefetch.bits
// Tag read for prefetch
metaReadArb.io.in(5).valid := mshrs.io.prefetch.valid
metaReadArb.io.in(5).bits.req(0).idx := mshrs.io.prefetch.bits.addr >> blockOffBits
metaReadArb.io.in(5).bits.req(0).way_en := DontCare
metaReadArb.io.in(5).bits.req(0).tag := DontCare
mshrs.io.prefetch.ready := metaReadArb.io.in(5).ready
// Prefetch does not need to read data array
val s0_valid = Mux(io.lsu.req.fire, VecInit(io.lsu.req.bits.map(_.valid)),
Mux(mshrs.io.replay.fire || wb_fire || prober_fire || prefetch_fire || mshrs.io.meta_read.fire,
VecInit(1.U(lsuWidth.W).asBools), VecInit(0.U(lsuWidth.W).asBools)))
val s0_req = Mux(io.lsu.req.fire , VecInit(io.lsu.req.bits.map(_.bits)),
Mux(wb_fire , wb_req,
Mux(prober_fire , prober_req,
Mux(prefetch_fire , prefetch_req,
Mux(mshrs.io.meta_read.fire, mshr_read_req
, replay_req)))))
val s0_type = Mux(io.lsu.req.fire , t_lsu,
Mux(wb_fire , t_wb,
Mux(prober_fire , t_probe,
Mux(prefetch_fire , t_prefetch,
Mux(mshrs.io.meta_read.fire, t_mshr_meta_read
, t_replay)))))
// Does this request need to send a response or nack
val s0_send_resp_or_nack = Mux(io.lsu.req.fire, s0_valid,
VecInit(Mux(mshrs.io.replay.fire && isRead(mshrs.io.replay.bits.uop.mem_cmd), 1.U(lsuWidth.W), 0.U(lsuWidth.W)).asBools))
val s1_req = RegNext(s0_req)
for (w <- 0 until lsuWidth)
s1_req(w).uop.br_mask := GetNewBrMask(io.lsu.brupdate, s0_req(w).uop)
val s2_store_failed = Wire(Bool())
val s1_valid = widthMap(w =>
RegNext(s0_valid(w) &&
!IsKilledByBranch(io.lsu.brupdate, false.B, s0_req(w).uop) &&
!(io.lsu.exception && s0_req(w).uop.uses_ldq) &&
!(s2_store_failed && io.lsu.req.fire && s0_req(w).uop.uses_stq),
init=false.B))
for (w <- 0 until lsuWidth)
assert(!(io.lsu.s1_kill(w) && !RegNext(io.lsu.req.fire) && !RegNext(io.lsu.req.bits(w).valid)))
val s1_addr = s1_req.map(_.addr)
val s1_nack = s1_addr.map(a => a(idxMSB,idxLSB) === prober.io.meta_write.bits.idx && !prober.io.req.ready)
val s1_send_resp_or_nack = RegNext(s0_send_resp_or_nack)
val s1_type = RegNext(s0_type)
val s1_mshr_meta_read_way_en = RegNext(mshrs.io.meta_read.bits.way_en)
val s1_replay_way_en = RegNext(mshrs.io.replay.bits.way_en) // For replays, the metadata isn't written yet
val s1_wb_way_en = RegNext(wb.io.data_req.bits.way_en)
// tag check
def wayMap[T <: Data](f: Int => T) = VecInit((0 until nWays).map(f))
val s1_tag_eq_way = widthMap(i => wayMap((w: Int) => meta(i).io.resp(w).tag === (s1_addr(i) >> untagBits)).asUInt)
val s1_tag_match_way = widthMap(i =>
Mux(s1_type === t_replay, s1_replay_way_en,
Mux(s1_type === t_wb, s1_wb_way_en,
Mux(s1_type === t_mshr_meta_read, s1_mshr_meta_read_way_en,
wayMap((w: Int) => s1_tag_eq_way(i)(w) && meta(i).io.resp(w).coh.isValid()).asUInt))))
val s1_wb_idx_matches = widthMap(i => (s1_addr(i)(untagBits-1,blockOffBits) === wb.io.idx.bits) && wb.io.idx.valid)
for (w <- 0 until lsuWidth) {
io.lsu.s1_nack_advisory(w) := data.io.s1_nacks(w)
}
val s2_req = RegNext(s1_req)
val s2_type = RegNext(s1_type)
val s2_valid = widthMap(w =>
RegNext(s1_valid(w) &&
!io.lsu.s1_kill(w) &&
!IsKilledByBranch(io.lsu.brupdate, false.B, s1_req(w).uop) &&
!(io.lsu.exception && s1_req(w).uop.uses_ldq) &&
!(s2_store_failed && (s1_type === t_lsu) && s1_req(w).uop.uses_stq)))
for (w <- 0 until lsuWidth)
s2_req(w).uop.br_mask := GetNewBrMask(io.lsu.brupdate, s1_req(w).uop)
val s2_tag_match_way = RegNext(s1_tag_match_way)
val s2_tag_match = s2_tag_match_way.map(_.orR)
val s2_hit_state = widthMap(i => Mux1H(s2_tag_match_way(i), wayMap((w: Int) => RegNext(meta(i).io.resp(w).coh))))
val s2_has_permission = widthMap(w => s2_hit_state(w).onAccess(s2_req(w).uop.mem_cmd)._1)
val s2_new_hit_state = widthMap(w => s2_hit_state(w).onAccess(s2_req(w).uop.mem_cmd)._3)
val s2_hit = widthMap(w => (s2_tag_match(w) && s2_has_permission(w) && s2_hit_state(w) === s2_new_hit_state(w) && !mshrs.io.block_hit(w)) || s2_type.isOneOf(t_replay, t_wb))
val s2_nack = Wire(Vec(lsuWidth, Bool()))
assert(!(s2_type === t_replay && !s2_hit(0)), "Replays should always hit")
assert(!(s2_type === t_wb && !s2_hit(0)), "Writeback should always see data hit")
val s2_wb_idx_matches = RegNext(s1_wb_idx_matches)
// lr/sc
val debug_sc_fail_addr = RegInit(0.U)
val debug_sc_fail_cnt = RegInit(0.U(8.W))
val lrsc_count = RegInit(0.U(log2Ceil(lrscCycles).W))
val lrsc_valid = lrsc_count > lrscBackoff.U
val lrsc_addr = Reg(UInt())
val s2_lr = s2_req(0).uop.mem_cmd === M_XLR && (!RegNext(s1_nack(0)) || s2_type === t_replay)
val s2_sc = s2_req(0).uop.mem_cmd === M_XSC && (!RegNext(s1_nack(0)) || s2_type === t_replay)
val s2_lrsc_addr_match = widthMap(w => lrsc_valid && lrsc_addr === (s2_req(w).addr >> blockOffBits))
val s2_sc_fail = s2_sc && !s2_lrsc_addr_match(0)
when (lrsc_count > 0.U) { lrsc_count := lrsc_count - 1.U }
when (s2_valid(0) && ((s2_type === t_lsu && s2_hit(0) && !s2_nack(0)) ||
(s2_type === t_replay && s2_req(0).uop.mem_cmd =/= M_FLUSH_ALL))) {
when (s2_lr) {
lrsc_count := (lrscCycles - 1).U
lrsc_addr := s2_req(0).addr >> blockOffBits
}
when (lrsc_count > 0.U) {
lrsc_count := 0.U
}
}
for (w <- 0 until lsuWidth) {
when (s2_valid(w) &&
s2_type === t_lsu &&
!s2_hit(w) &&
!(s2_has_permission(w) && s2_tag_match(w)) &&
s2_lrsc_addr_match(w) &&
!s2_nack(w)) {
lrsc_count := 0.U
}
}
when (s2_valid(0)) {
when (s2_req(0).addr === debug_sc_fail_addr) {
when (s2_sc_fail) {
debug_sc_fail_cnt := debug_sc_fail_cnt + 1.U
} .elsewhen (s2_sc) {
debug_sc_fail_cnt := 0.U
}
} .otherwise {
when (s2_sc_fail) {
debug_sc_fail_addr := s2_req(0).addr
debug_sc_fail_cnt := 1.U
}
}
}
assert(debug_sc_fail_cnt < 100.U, "L1DCache failed too many SCs in a row")
val s2_data = Wire(Vec(lsuWidth, Vec(nWays, UInt(encRowBits.W))))
for (i <- 0 until lsuWidth) {
for (w <- 0 until nWays) {
s2_data(i)(w) := data.io.resp(i)(w)
}
}
val s2_data_muxed = widthMap(w => Mux1H(s2_tag_match_way(w), s2_data(w)))
val s2_word_idx = widthMap(w => if (rowWords == 1) 0.U else s2_req(w).addr(log2Up(rowWords*wordBytes)-1, log2Up(wordBytes)))
// replacement policy
val replacer = cacheParams.replacement
val s1_replaced_way_en = UIntToOH(replacer.way)
val s2_replaced_way_en = UIntToOH(RegNext(replacer.way))
val s2_repl_meta = widthMap(i => Mux1H(s2_replaced_way_en, wayMap((w: Int) => RegNext(meta(i).io.resp(w))).toSeq))
// nack because of incoming probe
val s2_nack_hit = RegNext(VecInit(s1_nack))
// Nack when we hit something currently being evicted
val s2_nack_victim = widthMap(w => s2_valid(w) && s2_hit(w) && mshrs.io.secondary_miss(w))
// MSHRs not ready for request
val s2_nack_miss = widthMap(w => s2_valid(w) && !s2_hit(w) && !mshrs.io.req(w).ready)
// Bank conflict on data arrays
val s2_nack_data = widthMap(w => s2_valid(w) && RegNext(data.io.s1_nacks(w)))
// Can't allocate MSHR for same set currently being written back
val s2_nack_wb = widthMap(w => s2_valid(w) && !s2_hit(w) && s2_wb_idx_matches(w))
s2_nack := widthMap(w => (s2_nack_miss(w) || s2_nack_hit(w) || s2_nack_victim(w) || s2_nack_data(w) || s2_nack_wb(w)) && s2_type =/= t_replay)
assert(!(s2_nack_data.reduce(_||_) && s2_type.isOneOf(t_replay, t_wb)))
val s2_send_resp = widthMap(w => (
RegNext(s1_send_resp_or_nack(w)) &&
(!(s2_nack_hit(w) || s2_nack_victim(w) || s2_nack_data(w)) || s2_type === t_replay) &&
s2_hit(w) && isRead(s2_req(w).uop.mem_cmd)
))
val s2_send_store_ack = widthMap(w => (
RegNext(s1_send_resp_or_nack(w)) && !s2_nack(w) && isWrite(s2_req(w).uop.mem_cmd) &&
(s2_hit(w) || mshrs.io.req(w).fire)))
val s2_send_nack = widthMap(w => (RegNext(s1_send_resp_or_nack(w)) && s2_nack(w)))
for (w <- 0 until lsuWidth)
assert(!(s2_send_resp(w) && s2_send_nack(w)))
// hits always send a response
// If MSHR is not available, LSU has to replay this request later
// If MSHR is available and this is only a store(not a amo), we don't need to wait for resp later
s2_store_failed := s2_valid(0) && s2_nack(0) && s2_send_nack(0) && s2_req(0).uop.uses_stq
// Miss handling
for (w <- 0 until lsuWidth) {
mshrs.io.req(w).valid := s2_valid(w) &&
!s2_hit(w) &&
!s2_nack_hit(w) &&
!s2_nack_victim(w) &&
!s2_nack_data(w) &&
!s2_nack_wb(w) &&
s2_type.isOneOf(t_lsu, t_prefetch) &&
!(io.lsu.exception && s2_req(w).uop.uses_ldq) &&
(isPrefetch(s2_req(w).uop.mem_cmd) ||
isRead(s2_req(w).uop.mem_cmd) ||
isWrite(s2_req(w).uop.mem_cmd))
assert(!(mshrs.io.req(w).valid && s2_type === t_replay), "Replays should not need to go back into MSHRs")
mshrs.io.req(w).bits := DontCare
mshrs.io.req(w).bits.uop := s2_req(w).uop
mshrs.io.req(w).bits.addr := s2_req(w).addr
mshrs.io.req(w).bits.tag_match := s2_tag_match(w)
mshrs.io.req(w).bits.old_meta := Mux(s2_tag_match(w), L1Metadata(s2_repl_meta(w).tag, s2_hit_state(w)), s2_repl_meta(w))
mshrs.io.req(w).bits.way_en := Mux(s2_tag_match(w), s2_tag_match_way(w), s2_replaced_way_en)
mshrs.io.req(w).bits.data := s2_req(w).data
mshrs.io.req(w).bits.is_hella := s2_req(w).is_hella
mshrs.io.req_is_probe(w) := s2_type === t_probe && s2_valid(w)
}
mshrs.io.meta_resp.valid := !s2_nack_hit(0) || prober.io.mshr_wb_rdy
mshrs.io.meta_resp.bits := Mux1H(s2_tag_match_way(0), RegNext(meta(0).io.resp))
when (mshrs.io.req.map(_.fire).reduce(_||_)) { replacer.miss }
tl_out.a <> mshrs.io.mem_acquire
// probes and releases
prober.io.req.valid := tl_out.b.valid && !lrsc_valid
tl_out.b.ready := prober.io.req.ready && !lrsc_valid
prober.io.req.bits := tl_out.b.bits
prober.io.way_en := s2_tag_match_way(0)
prober.io.block_state := s2_hit_state(0)
metaWriteArb.io.in(1) <> prober.io.meta_write
prober.io.mshr_rdy := mshrs.io.probe_rdy
prober.io.wb_rdy := (prober.io.meta_write.bits.idx =/= wb.io.idx.bits) || !wb.io.idx.valid
mshrs.io.prober_state := prober.io.state
// refills
when (tl_out.d.bits.source === cfg.nMSHRs.U) {
// This should be ReleaseAck
tl_out.d.ready := true.B
mshrs.io.mem_grant.valid := false.B
mshrs.io.mem_grant.bits := DontCare
} .otherwise {
// This should be GrantData
mshrs.io.mem_grant <> tl_out.d
}
dataWriteArb.io.in(1) <> mshrs.io.refill
metaWriteArb.io.in(0) <> mshrs.io.meta_write
tl_out.e <> mshrs.io.mem_finish
// writebacks
val wbArb = Module(new Arbiter(new WritebackReq(edge.bundle), 2))
// 0 goes to prober, 1 goes to MSHR evictions
wbArb.io.in(0) <> prober.io.wb_req
wbArb.io.in(1) <> mshrs.io.wb_req
wb.io.req <> wbArb.io.out
wb.io.data_resp := s2_data_muxed(0)
mshrs.io.wb_resp := wb.io.resp
wb.io.mem_grant := tl_out.d.fire && tl_out.d.bits.source === cfg.nMSHRs.U
val lsu_release_arb = Module(new Arbiter(new TLBundleC(edge.bundle), 2))
io.lsu.release <> lsu_release_arb.io.out
lsu_release_arb.io.in(0) <> wb.io.lsu_release
lsu_release_arb.io.in(1) <> prober.io.lsu_release
TLArbiter.lowest(edge, tl_out.c, wb.io.release, prober.io.rep)
io.lsu.perf.release := edge.done(tl_out.c)
io.lsu.perf.acquire := edge.done(tl_out.a)
// load data gen
val s2_data_word_prebypass = widthMap(w => s2_data_muxed(w) >> Cat(s2_word_idx(w), 0.U(log2Ceil(coreDataBits).W)))
val s2_data_word = Wire(Vec(lsuWidth, UInt()))
val loadgen = (0 until lsuWidth).map { w =>
new LoadGen(s2_req(w).uop.mem_size, s2_req(w).uop.mem_signed, s2_req(w).addr,
s2_data_word(w), s2_sc && (w == 0).B, wordBytes)
}
// Mux between cache responses and uncache responses
for (w <- 0 until lsuWidth) {
io.lsu.resp(w).valid := s2_valid(w) && s2_send_resp(w)
io.lsu.resp(w).bits.uop := s2_req(w).uop
io.lsu.resp(w).bits.data := loadgen(w).data | s2_sc_fail
io.lsu.resp(w).bits.is_hella := s2_req(w).is_hella
io.lsu.nack(w).valid := s2_valid(w) && s2_send_nack(w)
io.lsu.nack(w).bits := s2_req(w)
assert(!(io.lsu.nack(w).valid && s2_type =/= t_lsu))
io.lsu.store_ack(w).valid := s2_valid(w) && s2_send_store_ack(w) && (w == 0).B
io.lsu.store_ack(w).bits := s2_req(w)
}
io.lsu.ll_resp <> mshrs.io.resp
// Store/amo hits
val s3_req = Wire(new BoomDCacheReq)
s3_req := RegNext(s2_req(0))
val s3_valid = RegNext(s2_valid(0) && s2_hit(0) && isWrite(s2_req(0).uop.mem_cmd) &&
!s2_sc_fail && !(s2_send_nack(0) && s2_nack(0)))
val s3_data_word = RegNext(s2_data_word(0))
for (w <- 1 until lsuWidth) {
assert(!(s2_valid(w) && s2_hit(w) && isWrite(s2_req(w).uop.mem_cmd) &&
!s2_sc_fail && !(s2_send_nack(w) && s2_nack(w))),
"Store must go through 0th pipe in L1D")
}
// For bypassing
val s4_req = RegNext(s3_req)
val s4_valid = RegNext(s3_valid)
val s5_req = RegNext(s4_req)
val s5_valid = RegNext(s4_valid)
val s3_bypass = widthMap(w => s3_valid && ((s2_req(w).addr >> wordOffBits) === (s3_req.addr >> wordOffBits)))
val s4_bypass = widthMap(w => s4_valid && ((s2_req(w).addr >> wordOffBits) === (s4_req.addr >> wordOffBits)))
val s5_bypass = widthMap(w => s5_valid && ((s2_req(w).addr >> wordOffBits) === (s5_req.addr >> wordOffBits)))
// Store -> Load bypassing
for (w <- 0 until lsuWidth) {
s2_data_word(w) := Mux(s3_bypass(w), s3_req.data,
Mux(s4_bypass(w), s4_req.data,
Mux(s5_bypass(w), s5_req.data,
s2_data_word_prebypass(w))))
}
val amoalu = Module(new AMOALU(xLen))
amoalu.io.mask := new StoreGen(s3_req.uop.mem_size, s3_req.addr, 0.U, xLen/8).mask
amoalu.io.cmd := s3_req.uop.mem_cmd
amoalu.io.lhs := s3_data_word
amoalu.io.rhs := RegNext(s2_req(0).data)
s3_req.data := amoalu.io.out
val s3_way = RegNext(s2_tag_match_way(0))
dataWriteArb.io.in(0).valid := s3_valid
dataWriteArb.io.in(0).bits.addr := s3_req.addr
dataWriteArb.io.in(0).bits.wmask := UIntToOH(s3_req.addr.extract(rowOffBits-1,offsetlsb))
dataWriteArb.io.in(0).bits.data := Fill(rowWords, s3_req.data)
dataWriteArb.io.in(0).bits.way_en := s3_way
io.lsu.ordered := mshrs.io.fence_rdy && !s1_valid.reduce(_||_) && !s2_valid.reduce(_||_)
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.diplomacy.{
AddressDecoder, AddressSet, BufferParams, DirectedBuffers, IdMap, IdMapEntry,
IdRange, RegionType, TransferSizes
}
import freechips.rocketchip.resources.{Resource, ResourceAddress, ResourcePermissions}
import freechips.rocketchip.util.{
AsyncQueueParams, BundleField, BundleFieldBase, BundleKeyBase,
CreditedDelay, groupByIntoSeq, RationalDirection, SimpleProduct
}
import scala.math.max
//These transfer sizes describe requests issued from masters on the A channel that will be responded by slaves on the D channel
case class TLMasterToSlaveTransferSizes(
// Supports both Acquire+Release of the following two sizes:
acquireT: TransferSizes = TransferSizes.none,
acquireB: TransferSizes = TransferSizes.none,
arithmetic: TransferSizes = TransferSizes.none,
logical: TransferSizes = TransferSizes.none,
get: TransferSizes = TransferSizes.none,
putFull: TransferSizes = TransferSizes.none,
putPartial: TransferSizes = TransferSizes.none,
hint: TransferSizes = TransferSizes.none)
extends TLCommonTransferSizes {
def intersect(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes(
acquireT = acquireT .intersect(rhs.acquireT),
acquireB = acquireB .intersect(rhs.acquireB),
arithmetic = arithmetic.intersect(rhs.arithmetic),
logical = logical .intersect(rhs.logical),
get = get .intersect(rhs.get),
putFull = putFull .intersect(rhs.putFull),
putPartial = putPartial.intersect(rhs.putPartial),
hint = hint .intersect(rhs.hint))
def mincover(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes(
acquireT = acquireT .mincover(rhs.acquireT),
acquireB = acquireB .mincover(rhs.acquireB),
arithmetic = arithmetic.mincover(rhs.arithmetic),
logical = logical .mincover(rhs.logical),
get = get .mincover(rhs.get),
putFull = putFull .mincover(rhs.putFull),
putPartial = putPartial.mincover(rhs.putPartial),
hint = hint .mincover(rhs.hint))
// Reduce rendering to a simple yes/no per field
override def toString = {
def str(x: TransferSizes, flag: String) = if (x.none) "" else flag
def flags = Vector(
str(acquireT, "T"),
str(acquireB, "B"),
str(arithmetic, "A"),
str(logical, "L"),
str(get, "G"),
str(putFull, "F"),
str(putPartial, "P"),
str(hint, "H"))
flags.mkString
}
// Prints out the actual information in a user readable way
def infoString = {
s"""acquireT = ${acquireT}
|acquireB = ${acquireB}
|arithmetic = ${arithmetic}
|logical = ${logical}
|get = ${get}
|putFull = ${putFull}
|putPartial = ${putPartial}
|hint = ${hint}
|
|""".stripMargin
}
}
object TLMasterToSlaveTransferSizes {
def unknownEmits = TLMasterToSlaveTransferSizes(
acquireT = TransferSizes(1, 4096),
acquireB = TransferSizes(1, 4096),
arithmetic = TransferSizes(1, 4096),
logical = TransferSizes(1, 4096),
get = TransferSizes(1, 4096),
putFull = TransferSizes(1, 4096),
putPartial = TransferSizes(1, 4096),
hint = TransferSizes(1, 4096))
def unknownSupports = TLMasterToSlaveTransferSizes()
}
//These transfer sizes describe requests issued from slaves on the B channel that will be responded by masters on the C channel
case class TLSlaveToMasterTransferSizes(
probe: TransferSizes = TransferSizes.none,
arithmetic: TransferSizes = TransferSizes.none,
logical: TransferSizes = TransferSizes.none,
get: TransferSizes = TransferSizes.none,
putFull: TransferSizes = TransferSizes.none,
putPartial: TransferSizes = TransferSizes.none,
hint: TransferSizes = TransferSizes.none
) extends TLCommonTransferSizes {
def intersect(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes(
probe = probe .intersect(rhs.probe),
arithmetic = arithmetic.intersect(rhs.arithmetic),
logical = logical .intersect(rhs.logical),
get = get .intersect(rhs.get),
putFull = putFull .intersect(rhs.putFull),
putPartial = putPartial.intersect(rhs.putPartial),
hint = hint .intersect(rhs.hint)
)
def mincover(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes(
probe = probe .mincover(rhs.probe),
arithmetic = arithmetic.mincover(rhs.arithmetic),
logical = logical .mincover(rhs.logical),
get = get .mincover(rhs.get),
putFull = putFull .mincover(rhs.putFull),
putPartial = putPartial.mincover(rhs.putPartial),
hint = hint .mincover(rhs.hint)
)
// Reduce rendering to a simple yes/no per field
override def toString = {
def str(x: TransferSizes, flag: String) = if (x.none) "" else flag
def flags = Vector(
str(probe, "P"),
str(arithmetic, "A"),
str(logical, "L"),
str(get, "G"),
str(putFull, "F"),
str(putPartial, "P"),
str(hint, "H"))
flags.mkString
}
// Prints out the actual information in a user readable way
def infoString = {
s"""probe = ${probe}
|arithmetic = ${arithmetic}
|logical = ${logical}
|get = ${get}
|putFull = ${putFull}
|putPartial = ${putPartial}
|hint = ${hint}
|
|""".stripMargin
}
}
object TLSlaveToMasterTransferSizes {
def unknownEmits = TLSlaveToMasterTransferSizes(
arithmetic = TransferSizes(1, 4096),
logical = TransferSizes(1, 4096),
get = TransferSizes(1, 4096),
putFull = TransferSizes(1, 4096),
putPartial = TransferSizes(1, 4096),
hint = TransferSizes(1, 4096),
probe = TransferSizes(1, 4096))
def unknownSupports = TLSlaveToMasterTransferSizes()
}
trait TLCommonTransferSizes {
def arithmetic: TransferSizes
def logical: TransferSizes
def get: TransferSizes
def putFull: TransferSizes
def putPartial: TransferSizes
def hint: TransferSizes
}
class TLSlaveParameters private(
val nodePath: Seq[BaseNode],
val resources: Seq[Resource],
setName: Option[String],
val address: Seq[AddressSet],
val regionType: RegionType.T,
val executable: Boolean,
val fifoId: Option[Int],
val supports: TLMasterToSlaveTransferSizes,
val emits: TLSlaveToMasterTransferSizes,
// By default, slaves are forbidden from issuing 'denied' responses (it prevents Fragmentation)
val alwaysGrantsT: Boolean, // typically only true for CacheCork'd read-write devices; dual: neverReleaseData
// If fifoId=Some, all accesses sent to the same fifoId are executed and ACK'd in FIFO order
// Note: you can only rely on this FIFO behaviour if your TLMasterParameters include requestFifo
val mayDenyGet: Boolean, // applies to: AccessAckData, GrantData
val mayDenyPut: Boolean) // applies to: AccessAck, Grant, HintAck
// ReleaseAck may NEVER be denied
extends SimpleProduct
{
def sortedAddress = address.sorted
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlaveParameters]
override def productPrefix = "TLSlaveParameters"
// We intentionally omit nodePath for equality testing / formatting
def productArity: Int = 11
def productElement(n: Int): Any = n match {
case 0 => name
case 1 => address
case 2 => resources
case 3 => regionType
case 4 => executable
case 5 => fifoId
case 6 => supports
case 7 => emits
case 8 => alwaysGrantsT
case 9 => mayDenyGet
case 10 => mayDenyPut
case _ => throw new IndexOutOfBoundsException(n.toString)
}
def supportsAcquireT: TransferSizes = supports.acquireT
def supportsAcquireB: TransferSizes = supports.acquireB
def supportsArithmetic: TransferSizes = supports.arithmetic
def supportsLogical: TransferSizes = supports.logical
def supportsGet: TransferSizes = supports.get
def supportsPutFull: TransferSizes = supports.putFull
def supportsPutPartial: TransferSizes = supports.putPartial
def supportsHint: TransferSizes = supports.hint
require (!address.isEmpty, "Address cannot be empty")
address.foreach { a => require (a.finite, "Address must be finite") }
address.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") }
require (supportsPutFull.contains(supportsPutPartial), s"PutFull($supportsPutFull) < PutPartial($supportsPutPartial)")
require (supportsPutFull.contains(supportsArithmetic), s"PutFull($supportsPutFull) < Arithmetic($supportsArithmetic)")
require (supportsPutFull.contains(supportsLogical), s"PutFull($supportsPutFull) < Logical($supportsLogical)")
require (supportsGet.contains(supportsArithmetic), s"Get($supportsGet) < Arithmetic($supportsArithmetic)")
require (supportsGet.contains(supportsLogical), s"Get($supportsGet) < Logical($supportsLogical)")
require (supportsAcquireB.contains(supportsAcquireT), s"AcquireB($supportsAcquireB) < AcquireT($supportsAcquireT)")
require (!alwaysGrantsT || supportsAcquireT, s"Must supportAcquireT if promising to always grantT")
// Make sure that the regionType agrees with the capabilities
require (!supportsAcquireB || regionType >= RegionType.UNCACHED) // acquire -> uncached, tracked, cached
require (regionType <= RegionType.UNCACHED || supportsAcquireB) // tracked, cached -> acquire
require (regionType != RegionType.UNCACHED || supportsGet) // uncached -> supportsGet
val name = setName.orElse(nodePath.lastOption.map(_.lazyModule.name)).getOrElse("disconnected")
val maxTransfer = List( // Largest supported transfer of all types
supportsAcquireT.max,
supportsAcquireB.max,
supportsArithmetic.max,
supportsLogical.max,
supportsGet.max,
supportsPutFull.max,
supportsPutPartial.max).max
val maxAddress = address.map(_.max).max
val minAlignment = address.map(_.alignment).min
// The device had better not support a transfer larger than its alignment
require (minAlignment >= maxTransfer, s"Bad $address: minAlignment ($minAlignment) must be >= maxTransfer ($maxTransfer)")
def toResource: ResourceAddress = {
ResourceAddress(address, ResourcePermissions(
r = supportsAcquireB || supportsGet,
w = supportsAcquireT || supportsPutFull,
x = executable,
c = supportsAcquireB,
a = supportsArithmetic && supportsLogical))
}
def findTreeViolation() = nodePath.find {
case _: MixedAdapterNode[_, _, _, _, _, _, _, _] => false
case _: SinkNode[_, _, _, _, _] => false
case node => node.inputs.size != 1
}
def isTree = findTreeViolation() == None
def infoString = {
s"""Slave Name = ${name}
|Slave Address = ${address}
|supports = ${supports.infoString}
|
|""".stripMargin
}
def v1copy(
address: Seq[AddressSet] = address,
resources: Seq[Resource] = resources,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
nodePath: Seq[BaseNode] = nodePath,
supportsAcquireT: TransferSizes = supports.acquireT,
supportsAcquireB: TransferSizes = supports.acquireB,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut,
alwaysGrantsT: Boolean = alwaysGrantsT,
fifoId: Option[Int] = fifoId) =
{
new TLSlaveParameters(
setName = setName,
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supports = TLMasterToSlaveTransferSizes(
acquireT = supportsAcquireT,
acquireB = supportsAcquireB,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = emits,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
def v2copy(
nodePath: Seq[BaseNode] = nodePath,
resources: Seq[Resource] = resources,
name: Option[String] = setName,
address: Seq[AddressSet] = address,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
fifoId: Option[Int] = fifoId,
supports: TLMasterToSlaveTransferSizes = supports,
emits: TLSlaveToMasterTransferSizes = emits,
alwaysGrantsT: Boolean = alwaysGrantsT,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut) =
{
new TLSlaveParameters(
nodePath = nodePath,
resources = resources,
setName = name,
address = address,
regionType = regionType,
executable = executable,
fifoId = fifoId,
supports = supports,
emits = emits,
alwaysGrantsT = alwaysGrantsT,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut)
}
@deprecated("Use v1copy instead of copy","")
def copy(
address: Seq[AddressSet] = address,
resources: Seq[Resource] = resources,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
nodePath: Seq[BaseNode] = nodePath,
supportsAcquireT: TransferSizes = supports.acquireT,
supportsAcquireB: TransferSizes = supports.acquireB,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut,
alwaysGrantsT: Boolean = alwaysGrantsT,
fifoId: Option[Int] = fifoId) =
{
v1copy(
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supportsAcquireT = supportsAcquireT,
supportsAcquireB = supportsAcquireB,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
}
object TLSlaveParameters {
def v1(
address: Seq[AddressSet],
resources: Seq[Resource] = Seq(),
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
nodePath: Seq[BaseNode] = Seq(),
supportsAcquireT: TransferSizes = TransferSizes.none,
supportsAcquireB: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false,
alwaysGrantsT: Boolean = false,
fifoId: Option[Int] = None) =
{
new TLSlaveParameters(
setName = None,
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supports = TLMasterToSlaveTransferSizes(
acquireT = supportsAcquireT,
acquireB = supportsAcquireB,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = TLSlaveToMasterTransferSizes.unknownEmits,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
def v2(
address: Seq[AddressSet],
nodePath: Seq[BaseNode] = Seq(),
resources: Seq[Resource] = Seq(),
name: Option[String] = None,
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
fifoId: Option[Int] = None,
supports: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownSupports,
emits: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownEmits,
alwaysGrantsT: Boolean = false,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false) =
{
new TLSlaveParameters(
nodePath = nodePath,
resources = resources,
setName = name,
address = address,
regionType = regionType,
executable = executable,
fifoId = fifoId,
supports = supports,
emits = emits,
alwaysGrantsT = alwaysGrantsT,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut)
}
}
object TLManagerParameters {
@deprecated("Use TLSlaveParameters.v1 instead of TLManagerParameters","")
def apply(
address: Seq[AddressSet],
resources: Seq[Resource] = Seq(),
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
nodePath: Seq[BaseNode] = Seq(),
supportsAcquireT: TransferSizes = TransferSizes.none,
supportsAcquireB: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false,
alwaysGrantsT: Boolean = false,
fifoId: Option[Int] = None) =
TLSlaveParameters.v1(
address,
resources,
regionType,
executable,
nodePath,
supportsAcquireT,
supportsAcquireB,
supportsArithmetic,
supportsLogical,
supportsGet,
supportsPutFull,
supportsPutPartial,
supportsHint,
mayDenyGet,
mayDenyPut,
alwaysGrantsT,
fifoId,
)
}
case class TLChannelBeatBytes(a: Option[Int], b: Option[Int], c: Option[Int], d: Option[Int])
{
def members = Seq(a, b, c, d)
members.collect { case Some(beatBytes) =>
require (isPow2(beatBytes), "Data channel width must be a power of 2")
}
}
object TLChannelBeatBytes{
def apply(beatBytes: Int): TLChannelBeatBytes = TLChannelBeatBytes(
Some(beatBytes),
Some(beatBytes),
Some(beatBytes),
Some(beatBytes))
def apply(): TLChannelBeatBytes = TLChannelBeatBytes(
None,
None,
None,
None)
}
class TLSlavePortParameters private(
val slaves: Seq[TLSlaveParameters],
val channelBytes: TLChannelBeatBytes,
val endSinkId: Int,
val minLatency: Int,
val responseFields: Seq[BundleFieldBase],
val requestKeys: Seq[BundleKeyBase]) extends SimpleProduct
{
def sortedSlaves = slaves.sortBy(_.sortedAddress.head)
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlavePortParameters]
override def productPrefix = "TLSlavePortParameters"
def productArity: Int = 6
def productElement(n: Int): Any = n match {
case 0 => slaves
case 1 => channelBytes
case 2 => endSinkId
case 3 => minLatency
case 4 => responseFields
case 5 => requestKeys
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!slaves.isEmpty, "Slave ports must have slaves")
require (endSinkId >= 0, "Sink ids cannot be negative")
require (minLatency >= 0, "Minimum required latency cannot be negative")
// Using this API implies you cannot handle mixed-width busses
def beatBytes = {
channelBytes.members.foreach { width =>
require (width.isDefined && width == channelBytes.a)
}
channelBytes.a.get
}
// TODO this should be deprecated
def managers = slaves
def requireFifo(policy: TLFIFOFixer.Policy = TLFIFOFixer.allFIFO) = {
val relevant = slaves.filter(m => policy(m))
relevant.foreach { m =>
require(m.fifoId == relevant.head.fifoId, s"${m.name} had fifoId ${m.fifoId}, which was not homogeneous (${slaves.map(s => (s.name, s.fifoId))}) ")
}
}
// Bounds on required sizes
def maxAddress = slaves.map(_.maxAddress).max
def maxTransfer = slaves.map(_.maxTransfer).max
def mayDenyGet = slaves.exists(_.mayDenyGet)
def mayDenyPut = slaves.exists(_.mayDenyPut)
// Diplomatically determined operation sizes emitted by all outward Slaves
// as opposed to emits* which generate circuitry to check which specific addresses
val allEmitClaims = slaves.map(_.emits).reduce( _ intersect _)
// Operation Emitted by at least one outward Slaves
// as opposed to emits* which generate circuitry to check which specific addresses
val anyEmitClaims = slaves.map(_.emits).reduce(_ mincover _)
// Diplomatically determined operation sizes supported by all outward Slaves
// as opposed to supports* which generate circuitry to check which specific addresses
val allSupportClaims = slaves.map(_.supports).reduce( _ intersect _)
val allSupportAcquireT = allSupportClaims.acquireT
val allSupportAcquireB = allSupportClaims.acquireB
val allSupportArithmetic = allSupportClaims.arithmetic
val allSupportLogical = allSupportClaims.logical
val allSupportGet = allSupportClaims.get
val allSupportPutFull = allSupportClaims.putFull
val allSupportPutPartial = allSupportClaims.putPartial
val allSupportHint = allSupportClaims.hint
// Operation supported by at least one outward Slaves
// as opposed to supports* which generate circuitry to check which specific addresses
val anySupportClaims = slaves.map(_.supports).reduce(_ mincover _)
val anySupportAcquireT = !anySupportClaims.acquireT.none
val anySupportAcquireB = !anySupportClaims.acquireB.none
val anySupportArithmetic = !anySupportClaims.arithmetic.none
val anySupportLogical = !anySupportClaims.logical.none
val anySupportGet = !anySupportClaims.get.none
val anySupportPutFull = !anySupportClaims.putFull.none
val anySupportPutPartial = !anySupportClaims.putPartial.none
val anySupportHint = !anySupportClaims.hint.none
// Supporting Acquire means being routable for GrantAck
require ((endSinkId == 0) == !anySupportAcquireB)
// These return Option[TLSlaveParameters] for your convenience
def find(address: BigInt) = slaves.find(_.address.exists(_.contains(address)))
// The safe version will check the entire address
def findSafe(address: UInt) = VecInit(sortedSlaves.map(_.address.map(_.contains(address)).reduce(_ || _)))
// The fast version assumes the address is valid (you probably want fastProperty instead of this function)
def findFast(address: UInt) = {
val routingMask = AddressDecoder(slaves.map(_.address))
VecInit(sortedSlaves.map(_.address.map(_.widen(~routingMask)).distinct.map(_.contains(address)).reduce(_ || _)))
}
// Compute the simplest AddressSets that decide a key
def fastPropertyGroup[K](p: TLSlaveParameters => K): Seq[(K, Seq[AddressSet])] = {
val groups = groupByIntoSeq(sortedSlaves.map(m => (p(m), m.address)))( _._1).map { case (k, vs) =>
k -> vs.flatMap(_._2)
}
val reductionMask = AddressDecoder(groups.map(_._2))
groups.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~reductionMask)).distinct) }
}
// Select a property
def fastProperty[K, D <: Data](address: UInt, p: TLSlaveParameters => K, d: K => D): D =
Mux1H(fastPropertyGroup(p).map { case (v, a) => (a.map(_.contains(address)).reduce(_||_), d(v)) })
// Note: returns the actual fifoId + 1 or 0 if None
def findFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.map(_+1).getOrElse(0), (i:Int) => i.U)
def hasFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.isDefined, (b:Boolean) => b.B)
// Does this Port manage this ID/address?
def containsSafe(address: UInt) = findSafe(address).reduce(_ || _)
private def addressHelper(
// setting safe to false indicates that all addresses are expected to be legal, which might reduce circuit complexity
safe: Boolean,
// member filters out the sizes being checked based on the opcode being emitted or supported
member: TLSlaveParameters => TransferSizes,
address: UInt,
lgSize: UInt,
// range provides a limit on the sizes that are expected to be evaluated, which might reduce circuit complexity
range: Option[TransferSizes]): Bool = {
// trim reduces circuit complexity by intersecting checked sizes with the range argument
def trim(x: TransferSizes) = range.map(_.intersect(x)).getOrElse(x)
// groupBy returns an unordered map, convert back to Seq and sort the result for determinism
// groupByIntoSeq is turning slaves into trimmed membership sizes
// We are grouping all the slaves by their transfer size where
// if they support the trimmed size then
// member is the type of transfer that you are looking for (What you are trying to filter on)
// When you consider membership, you are trimming the sizes to only the ones that you care about
// you are filtering the slaves based on both whether they support a particular opcode and the size
// Grouping the slaves based on the actual transfer size range they support
// intersecting the range and checking their membership
// FOR SUPPORTCASES instead of returning the list of slaves,
// you are returning a map from transfer size to the set of
// address sets that are supported for that transfer size
// find all the slaves that support a certain type of operation and then group their addresses by the supported size
// for every size there could be multiple address ranges
// safety is a trade off between checking between all possible addresses vs only the addresses
// that are known to have supported sizes
// the trade off is 'checking all addresses is a more expensive circuit but will always give you
// the right answer even if you give it an illegal address'
// the not safe version is a cheaper circuit but if you give it an illegal address then it might produce the wrong answer
// fast presumes address legality
// This groupByIntoSeq deterministically groups all address sets for which a given `member` transfer size applies.
// In the resulting Map of cases, the keys are transfer sizes and the values are all address sets which emit or support that size.
val supportCases = groupByIntoSeq(slaves)(m => trim(member(m))).map { case (k: TransferSizes, vs: Seq[TLSlaveParameters]) =>
k -> vs.flatMap(_.address)
}
// safe produces a circuit that compares against all possible addresses,
// whereas fast presumes that the address is legal but uses an efficient address decoder
val mask = if (safe) ~BigInt(0) else AddressDecoder(supportCases.map(_._2))
// Simplified creates the most concise possible representation of each cases' address sets based on the mask.
val simplified = supportCases.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~mask)).distinct) }
simplified.map { case (s, a) =>
// s is a size, you are checking for this size either the size of the operation is in s
// We return an or-reduction of all the cases, checking whether any contains both the dynamic size and dynamic address on the wire.
((Some(s) == range).B || s.containsLg(lgSize)) &&
a.map(_.contains(address)).reduce(_||_)
}.foldLeft(false.B)(_||_)
}
def supportsAcquireTSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireT, address, lgSize, range)
def supportsAcquireBSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireB, address, lgSize, range)
def supportsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.arithmetic, address, lgSize, range)
def supportsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.logical, address, lgSize, range)
def supportsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.get, address, lgSize, range)
def supportsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putFull, address, lgSize, range)
def supportsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putPartial, address, lgSize, range)
def supportsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.hint, address, lgSize, range)
def supportsAcquireTFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireT, address, lgSize, range)
def supportsAcquireBFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireB, address, lgSize, range)
def supportsArithmeticFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.arithmetic, address, lgSize, range)
def supportsLogicalFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.logical, address, lgSize, range)
def supportsGetFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.get, address, lgSize, range)
def supportsPutFullFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putFull, address, lgSize, range)
def supportsPutPartialFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putPartial, address, lgSize, range)
def supportsHintFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.hint, address, lgSize, range)
def emitsProbeSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.probe, address, lgSize, range)
def emitsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.arithmetic, address, lgSize, range)
def emitsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.logical, address, lgSize, range)
def emitsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.get, address, lgSize, range)
def emitsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putFull, address, lgSize, range)
def emitsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putPartial, address, lgSize, range)
def emitsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.hint, address, lgSize, range)
def findTreeViolation() = slaves.flatMap(_.findTreeViolation()).headOption
def isTree = !slaves.exists(!_.isTree)
def infoString = "Slave Port Beatbytes = " + beatBytes + "\n" + "Slave Port MinLatency = " + minLatency + "\n\n" + slaves.map(_.infoString).mkString
def v1copy(
managers: Seq[TLSlaveParameters] = slaves,
beatBytes: Int = -1,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
new TLSlavePortParameters(
slaves = managers,
channelBytes = if (beatBytes != -1) TLChannelBeatBytes(beatBytes) else channelBytes,
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
def v2copy(
slaves: Seq[TLSlaveParameters] = slaves,
channelBytes: TLChannelBeatBytes = channelBytes,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
new TLSlavePortParameters(
slaves = slaves,
channelBytes = channelBytes,
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
@deprecated("Use v1copy instead of copy","")
def copy(
managers: Seq[TLSlaveParameters] = slaves,
beatBytes: Int = -1,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
v1copy(
managers,
beatBytes,
endSinkId,
minLatency,
responseFields,
requestKeys)
}
}
object TLSlavePortParameters {
def v1(
managers: Seq[TLSlaveParameters],
beatBytes: Int,
endSinkId: Int = 0,
minLatency: Int = 0,
responseFields: Seq[BundleFieldBase] = Nil,
requestKeys: Seq[BundleKeyBase] = Nil) =
{
new TLSlavePortParameters(
slaves = managers,
channelBytes = TLChannelBeatBytes(beatBytes),
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
}
object TLManagerPortParameters {
@deprecated("Use TLSlavePortParameters.v1 instead of TLManagerPortParameters","")
def apply(
managers: Seq[TLSlaveParameters],
beatBytes: Int,
endSinkId: Int = 0,
minLatency: Int = 0,
responseFields: Seq[BundleFieldBase] = Nil,
requestKeys: Seq[BundleKeyBase] = Nil) =
{
TLSlavePortParameters.v1(
managers,
beatBytes,
endSinkId,
minLatency,
responseFields,
requestKeys)
}
}
class TLMasterParameters private(
val nodePath: Seq[BaseNode],
val resources: Seq[Resource],
val name: String,
val visibility: Seq[AddressSet],
val unusedRegionTypes: Set[RegionType.T],
val executesOnly: Boolean,
val requestFifo: Boolean, // only a request, not a requirement. applies to A, not C.
val supports: TLSlaveToMasterTransferSizes,
val emits: TLMasterToSlaveTransferSizes,
val neverReleasesData: Boolean,
val sourceId: IdRange) extends SimpleProduct
{
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterParameters]
override def productPrefix = "TLMasterParameters"
// We intentionally omit nodePath for equality testing / formatting
def productArity: Int = 10
def productElement(n: Int): Any = n match {
case 0 => name
case 1 => sourceId
case 2 => resources
case 3 => visibility
case 4 => unusedRegionTypes
case 5 => executesOnly
case 6 => requestFifo
case 7 => supports
case 8 => emits
case 9 => neverReleasesData
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!sourceId.isEmpty)
require (!visibility.isEmpty)
require (supports.putFull.contains(supports.putPartial))
// We only support these operations if we support Probe (ie: we're a cache)
require (supports.probe.contains(supports.arithmetic))
require (supports.probe.contains(supports.logical))
require (supports.probe.contains(supports.get))
require (supports.probe.contains(supports.putFull))
require (supports.probe.contains(supports.putPartial))
require (supports.probe.contains(supports.hint))
visibility.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") }
val maxTransfer = List(
supports.probe.max,
supports.arithmetic.max,
supports.logical.max,
supports.get.max,
supports.putFull.max,
supports.putPartial.max).max
def infoString = {
s"""Master Name = ${name}
|visibility = ${visibility}
|emits = ${emits.infoString}
|sourceId = ${sourceId}
|
|""".stripMargin
}
def v1copy(
name: String = name,
sourceId: IdRange = sourceId,
nodePath: Seq[BaseNode] = nodePath,
requestFifo: Boolean = requestFifo,
visibility: Seq[AddressSet] = visibility,
supportsProbe: TransferSizes = supports.probe,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = this.resources,
name = name,
visibility = visibility,
unusedRegionTypes = this.unusedRegionTypes,
executesOnly = this.executesOnly,
requestFifo = requestFifo,
supports = TLSlaveToMasterTransferSizes(
probe = supportsProbe,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = this.emits,
neverReleasesData = this.neverReleasesData,
sourceId = sourceId)
}
def v2copy(
nodePath: Seq[BaseNode] = nodePath,
resources: Seq[Resource] = resources,
name: String = name,
visibility: Seq[AddressSet] = visibility,
unusedRegionTypes: Set[RegionType.T] = unusedRegionTypes,
executesOnly: Boolean = executesOnly,
requestFifo: Boolean = requestFifo,
supports: TLSlaveToMasterTransferSizes = supports,
emits: TLMasterToSlaveTransferSizes = emits,
neverReleasesData: Boolean = neverReleasesData,
sourceId: IdRange = sourceId) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = resources,
name = name,
visibility = visibility,
unusedRegionTypes = unusedRegionTypes,
executesOnly = executesOnly,
requestFifo = requestFifo,
supports = supports,
emits = emits,
neverReleasesData = neverReleasesData,
sourceId = sourceId)
}
@deprecated("Use v1copy instead of copy","")
def copy(
name: String = name,
sourceId: IdRange = sourceId,
nodePath: Seq[BaseNode] = nodePath,
requestFifo: Boolean = requestFifo,
visibility: Seq[AddressSet] = visibility,
supportsProbe: TransferSizes = supports.probe,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint) =
{
v1copy(
name = name,
sourceId = sourceId,
nodePath = nodePath,
requestFifo = requestFifo,
visibility = visibility,
supportsProbe = supportsProbe,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint)
}
}
object TLMasterParameters {
def v1(
name: String,
sourceId: IdRange = IdRange(0,1),
nodePath: Seq[BaseNode] = Seq(),
requestFifo: Boolean = false,
visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)),
supportsProbe: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = Nil,
name = name,
visibility = visibility,
unusedRegionTypes = Set(),
executesOnly = false,
requestFifo = requestFifo,
supports = TLSlaveToMasterTransferSizes(
probe = supportsProbe,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = TLMasterToSlaveTransferSizes.unknownEmits,
neverReleasesData = false,
sourceId = sourceId)
}
def v2(
nodePath: Seq[BaseNode] = Seq(),
resources: Seq[Resource] = Nil,
name: String,
visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)),
unusedRegionTypes: Set[RegionType.T] = Set(),
executesOnly: Boolean = false,
requestFifo: Boolean = false,
supports: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownSupports,
emits: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownEmits,
neverReleasesData: Boolean = false,
sourceId: IdRange = IdRange(0,1)) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = resources,
name = name,
visibility = visibility,
unusedRegionTypes = unusedRegionTypes,
executesOnly = executesOnly,
requestFifo = requestFifo,
supports = supports,
emits = emits,
neverReleasesData = neverReleasesData,
sourceId = sourceId)
}
}
object TLClientParameters {
@deprecated("Use TLMasterParameters.v1 instead of TLClientParameters","")
def apply(
name: String,
sourceId: IdRange = IdRange(0,1),
nodePath: Seq[BaseNode] = Seq(),
requestFifo: Boolean = false,
visibility: Seq[AddressSet] = Seq(AddressSet.everything),
supportsProbe: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none) =
{
TLMasterParameters.v1(
name = name,
sourceId = sourceId,
nodePath = nodePath,
requestFifo = requestFifo,
visibility = visibility,
supportsProbe = supportsProbe,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint)
}
}
class TLMasterPortParameters private(
val masters: Seq[TLMasterParameters],
val channelBytes: TLChannelBeatBytes,
val minLatency: Int,
val echoFields: Seq[BundleFieldBase],
val requestFields: Seq[BundleFieldBase],
val responseKeys: Seq[BundleKeyBase]) extends SimpleProduct
{
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterPortParameters]
override def productPrefix = "TLMasterPortParameters"
def productArity: Int = 6
def productElement(n: Int): Any = n match {
case 0 => masters
case 1 => channelBytes
case 2 => minLatency
case 3 => echoFields
case 4 => requestFields
case 5 => responseKeys
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!masters.isEmpty)
require (minLatency >= 0)
def clients = masters
// Require disjoint ranges for Ids
IdRange.overlaps(masters.map(_.sourceId)).foreach { case (x, y) =>
require (!x.overlaps(y), s"TLClientParameters.sourceId ${x} overlaps ${y}")
}
// Bounds on required sizes
def endSourceId = masters.map(_.sourceId.end).max
def maxTransfer = masters.map(_.maxTransfer).max
// The unused sources < endSourceId
def unusedSources: Seq[Int] = {
val usedSources = masters.map(_.sourceId).sortBy(_.start)
((Seq(0) ++ usedSources.map(_.end)) zip usedSources.map(_.start)) flatMap { case (end, start) =>
end until start
}
}
// Diplomatically determined operation sizes emitted by all inward Masters
// as opposed to emits* which generate circuitry to check which specific addresses
val allEmitClaims = masters.map(_.emits).reduce( _ intersect _)
// Diplomatically determined operation sizes Emitted by at least one inward Masters
// as opposed to emits* which generate circuitry to check which specific addresses
val anyEmitClaims = masters.map(_.emits).reduce(_ mincover _)
// Diplomatically determined operation sizes supported by all inward Masters
// as opposed to supports* which generate circuitry to check which specific addresses
val allSupportProbe = masters.map(_.supports.probe) .reduce(_ intersect _)
val allSupportArithmetic = masters.map(_.supports.arithmetic).reduce(_ intersect _)
val allSupportLogical = masters.map(_.supports.logical) .reduce(_ intersect _)
val allSupportGet = masters.map(_.supports.get) .reduce(_ intersect _)
val allSupportPutFull = masters.map(_.supports.putFull) .reduce(_ intersect _)
val allSupportPutPartial = masters.map(_.supports.putPartial).reduce(_ intersect _)
val allSupportHint = masters.map(_.supports.hint) .reduce(_ intersect _)
// Diplomatically determined operation sizes supported by at least one master
// as opposed to supports* which generate circuitry to check which specific addresses
val anySupportProbe = masters.map(!_.supports.probe.none) .reduce(_ || _)
val anySupportArithmetic = masters.map(!_.supports.arithmetic.none).reduce(_ || _)
val anySupportLogical = masters.map(!_.supports.logical.none) .reduce(_ || _)
val anySupportGet = masters.map(!_.supports.get.none) .reduce(_ || _)
val anySupportPutFull = masters.map(!_.supports.putFull.none) .reduce(_ || _)
val anySupportPutPartial = masters.map(!_.supports.putPartial.none).reduce(_ || _)
val anySupportHint = masters.map(!_.supports.hint.none) .reduce(_ || _)
// These return Option[TLMasterParameters] for your convenience
def find(id: Int) = masters.find(_.sourceId.contains(id))
// Synthesizable lookup methods
def find(id: UInt) = VecInit(masters.map(_.sourceId.contains(id)))
def contains(id: UInt) = find(id).reduce(_ || _)
def requestFifo(id: UInt) = Mux1H(find(id), masters.map(c => c.requestFifo.B))
// Available during RTL runtime, checks to see if (id, size) is supported by the master's (client's) diplomatic parameters
private def sourceIdHelper(member: TLMasterParameters => TransferSizes)(id: UInt, lgSize: UInt) = {
val allSame = masters.map(member(_) == member(masters(0))).reduce(_ && _)
// this if statement is a coarse generalization of the groupBy in the sourceIdHelper2 version;
// the case where there is only one group.
if (allSame) member(masters(0)).containsLg(lgSize) else {
// Find the master associated with ID and returns whether that particular master is able to receive transaction of lgSize
Mux1H(find(id), masters.map(member(_).containsLg(lgSize)))
}
}
// Check for support of a given operation at a specific id
val supportsProbe = sourceIdHelper(_.supports.probe) _
val supportsArithmetic = sourceIdHelper(_.supports.arithmetic) _
val supportsLogical = sourceIdHelper(_.supports.logical) _
val supportsGet = sourceIdHelper(_.supports.get) _
val supportsPutFull = sourceIdHelper(_.supports.putFull) _
val supportsPutPartial = sourceIdHelper(_.supports.putPartial) _
val supportsHint = sourceIdHelper(_.supports.hint) _
// TODO: Merge sourceIdHelper2 with sourceIdHelper
private def sourceIdHelper2(
member: TLMasterParameters => TransferSizes,
sourceId: UInt,
lgSize: UInt): Bool = {
// Because sourceIds are uniquely owned by each master, we use them to group the
// cases that have to be checked.
val emitCases = groupByIntoSeq(masters)(m => member(m)).map { case (k, vs) =>
k -> vs.map(_.sourceId)
}
emitCases.map { case (s, a) =>
(s.containsLg(lgSize)) &&
a.map(_.contains(sourceId)).reduce(_||_)
}.foldLeft(false.B)(_||_)
}
// Check for emit of a given operation at a specific id
def emitsAcquireT (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireT, sourceId, lgSize)
def emitsAcquireB (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireB, sourceId, lgSize)
def emitsArithmetic(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.arithmetic, sourceId, lgSize)
def emitsLogical (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.logical, sourceId, lgSize)
def emitsGet (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.get, sourceId, lgSize)
def emitsPutFull (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putFull, sourceId, lgSize)
def emitsPutPartial(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putPartial, sourceId, lgSize)
def emitsHint (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.hint, sourceId, lgSize)
def infoString = masters.map(_.infoString).mkString
def v1copy(
clients: Seq[TLMasterParameters] = masters,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
new TLMasterPortParameters(
masters = clients,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
def v2copy(
masters: Seq[TLMasterParameters] = masters,
channelBytes: TLChannelBeatBytes = channelBytes,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
new TLMasterPortParameters(
masters = masters,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
@deprecated("Use v1copy instead of copy","")
def copy(
clients: Seq[TLMasterParameters] = masters,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
v1copy(
clients,
minLatency,
echoFields,
requestFields,
responseKeys)
}
}
object TLClientPortParameters {
@deprecated("Use TLMasterPortParameters.v1 instead of TLClientPortParameters","")
def apply(
clients: Seq[TLMasterParameters],
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
TLMasterPortParameters.v1(
clients,
minLatency,
echoFields,
requestFields,
responseKeys)
}
}
object TLMasterPortParameters {
def v1(
clients: Seq[TLMasterParameters],
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
new TLMasterPortParameters(
masters = clients,
channelBytes = TLChannelBeatBytes(),
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
def v2(
masters: Seq[TLMasterParameters],
channelBytes: TLChannelBeatBytes = TLChannelBeatBytes(),
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
new TLMasterPortParameters(
masters = masters,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
}
case class TLBundleParameters(
addressBits: Int,
dataBits: Int,
sourceBits: Int,
sinkBits: Int,
sizeBits: Int,
echoFields: Seq[BundleFieldBase],
requestFields: Seq[BundleFieldBase],
responseFields: Seq[BundleFieldBase],
hasBCE: Boolean)
{
// Chisel has issues with 0-width wires
require (addressBits >= 1)
require (dataBits >= 8)
require (sourceBits >= 1)
require (sinkBits >= 1)
require (sizeBits >= 1)
require (isPow2(dataBits))
echoFields.foreach { f => require (f.key.isControl, s"${f} is not a legal echo field") }
val addrLoBits = log2Up(dataBits/8)
// Used to uniquify bus IP names
def shortName = s"a${addressBits}d${dataBits}s${sourceBits}k${sinkBits}z${sizeBits}" + (if (hasBCE) "c" else "u")
def union(x: TLBundleParameters) =
TLBundleParameters(
max(addressBits, x.addressBits),
max(dataBits, x.dataBits),
max(sourceBits, x.sourceBits),
max(sinkBits, x.sinkBits),
max(sizeBits, x.sizeBits),
echoFields = BundleField.union(echoFields ++ x.echoFields),
requestFields = BundleField.union(requestFields ++ x.requestFields),
responseFields = BundleField.union(responseFields ++ x.responseFields),
hasBCE || x.hasBCE)
}
object TLBundleParameters
{
val emptyBundleParams = TLBundleParameters(
addressBits = 1,
dataBits = 8,
sourceBits = 1,
sinkBits = 1,
sizeBits = 1,
echoFields = Nil,
requestFields = Nil,
responseFields = Nil,
hasBCE = false)
def union(x: Seq[TLBundleParameters]) = x.foldLeft(emptyBundleParams)((x,y) => x.union(y))
def apply(master: TLMasterPortParameters, slave: TLSlavePortParameters) =
new TLBundleParameters(
addressBits = log2Up(slave.maxAddress + 1),
dataBits = slave.beatBytes * 8,
sourceBits = log2Up(master.endSourceId),
sinkBits = log2Up(slave.endSinkId),
sizeBits = log2Up(log2Ceil(max(master.maxTransfer, slave.maxTransfer))+1),
echoFields = master.echoFields,
requestFields = BundleField.accept(master.requestFields, slave.requestKeys),
responseFields = BundleField.accept(slave.responseFields, master.responseKeys),
hasBCE = master.anySupportProbe && slave.anySupportAcquireB)
}
case class TLEdgeParameters(
master: TLMasterPortParameters,
slave: TLSlavePortParameters,
params: Parameters,
sourceInfo: SourceInfo) extends FormatEdge
{
// legacy names:
def manager = slave
def client = master
val maxTransfer = max(master.maxTransfer, slave.maxTransfer)
val maxLgSize = log2Ceil(maxTransfer)
// Sanity check the link...
require (maxTransfer >= slave.beatBytes, s"Link's max transfer (${maxTransfer}) < ${slave.slaves.map(_.name)}'s beatBytes (${slave.beatBytes})")
def diplomaticClaimsMasterToSlave = master.anyEmitClaims.intersect(slave.anySupportClaims)
val bundle = TLBundleParameters(master, slave)
def formatEdge = master.infoString + "\n" + slave.infoString
}
case class TLCreditedDelay(
a: CreditedDelay,
b: CreditedDelay,
c: CreditedDelay,
d: CreditedDelay,
e: CreditedDelay)
{
def + (that: TLCreditedDelay): TLCreditedDelay = TLCreditedDelay(
a = a + that.a,
b = b + that.b,
c = c + that.c,
d = d + that.d,
e = e + that.e)
override def toString = s"(${a}, ${b}, ${c}, ${d}, ${e})"
}
object TLCreditedDelay {
def apply(delay: CreditedDelay): TLCreditedDelay = apply(delay, delay.flip, delay, delay.flip, delay)
}
case class TLCreditedManagerPortParameters(delay: TLCreditedDelay, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLCreditedClientPortParameters(delay: TLCreditedDelay, base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLCreditedEdgeParameters(client: TLCreditedClientPortParameters, manager: TLCreditedManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val delay = client.delay + manager.delay
val bundle = TLBundleParameters(client.base, manager.base)
def formatEdge = client.infoString + "\n" + manager.infoString
}
case class TLAsyncManagerPortParameters(async: AsyncQueueParams, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLAsyncClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLAsyncBundleParameters(async: AsyncQueueParams, base: TLBundleParameters)
case class TLAsyncEdgeParameters(client: TLAsyncClientPortParameters, manager: TLAsyncManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val bundle = TLAsyncBundleParameters(manager.async, TLBundleParameters(client.base, manager.base))
def formatEdge = client.infoString + "\n" + manager.infoString
}
case class TLRationalManagerPortParameters(direction: RationalDirection, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLRationalClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLRationalEdgeParameters(client: TLRationalClientPortParameters, manager: TLRationalManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val bundle = TLBundleParameters(client.base, manager.base)
def formatEdge = client.infoString + "\n" + manager.infoString
}
// To be unified, devices must agree on all of these terms
case class ManagerUnificationKey(
resources: Seq[Resource],
regionType: RegionType.T,
executable: Boolean,
supportsAcquireT: TransferSizes,
supportsAcquireB: TransferSizes,
supportsArithmetic: TransferSizes,
supportsLogical: TransferSizes,
supportsGet: TransferSizes,
supportsPutFull: TransferSizes,
supportsPutPartial: TransferSizes,
supportsHint: TransferSizes)
object ManagerUnificationKey
{
def apply(x: TLSlaveParameters): ManagerUnificationKey = ManagerUnificationKey(
resources = x.resources,
regionType = x.regionType,
executable = x.executable,
supportsAcquireT = x.supportsAcquireT,
supportsAcquireB = x.supportsAcquireB,
supportsArithmetic = x.supportsArithmetic,
supportsLogical = x.supportsLogical,
supportsGet = x.supportsGet,
supportsPutFull = x.supportsPutFull,
supportsPutPartial = x.supportsPutPartial,
supportsHint = x.supportsHint)
}
object ManagerUnification
{
def apply(slaves: Seq[TLSlaveParameters]): List[TLSlaveParameters] = {
slaves.groupBy(ManagerUnificationKey.apply).values.map { seq =>
val agree = seq.forall(_.fifoId == seq.head.fifoId)
seq(0).v1copy(
address = AddressSet.unify(seq.flatMap(_.address)),
fifoId = if (agree) seq(0).fifoId else None)
}.toList
}
}
case class TLBufferParams(
a: BufferParams = BufferParams.none,
b: BufferParams = BufferParams.none,
c: BufferParams = BufferParams.none,
d: BufferParams = BufferParams.none,
e: BufferParams = BufferParams.none
) extends DirectedBuffers[TLBufferParams] {
def copyIn(x: BufferParams) = this.copy(b = x, d = x)
def copyOut(x: BufferParams) = this.copy(a = x, c = x, e = x)
def copyInOut(x: BufferParams) = this.copyIn(x).copyOut(x)
}
/** Pretty printing of TL source id maps */
class TLSourceIdMap(tl: TLMasterPortParameters) extends IdMap[TLSourceIdMapEntry] {
private val tlDigits = String.valueOf(tl.endSourceId-1).length()
protected val fmt = s"\t[%${tlDigits}d, %${tlDigits}d) %s%s%s"
private val sorted = tl.masters.sortBy(_.sourceId)
val mapping: Seq[TLSourceIdMapEntry] = sorted.map { case c =>
TLSourceIdMapEntry(c.sourceId, c.name, c.supports.probe, c.requestFifo)
}
}
case class TLSourceIdMapEntry(tlId: IdRange, name: String, isCache: Boolean, requestFifo: Boolean)
extends IdMapEntry
{
val from = tlId
val to = tlId
val maxTransactionsInFlight = Some(tlId.size)
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module BoomWritebackUnit( // @[dcache.scala:24:7]
input clock, // @[dcache.scala:24:7]
input reset, // @[dcache.scala:24:7]
output io_req_ready, // @[dcache.scala:25:14]
input io_req_valid, // @[dcache.scala:25:14]
input [19:0] io_req_bits_tag, // @[dcache.scala:25:14]
input [5:0] io_req_bits_idx, // @[dcache.scala:25:14]
input [1:0] io_req_bits_source, // @[dcache.scala:25:14]
input [2:0] io_req_bits_param, // @[dcache.scala:25:14]
input [3:0] io_req_bits_way_en, // @[dcache.scala:25:14]
input io_req_bits_voluntary, // @[dcache.scala:25:14]
input io_meta_read_ready, // @[dcache.scala:25:14]
output io_meta_read_valid, // @[dcache.scala:25:14]
output [5:0] io_meta_read_bits_idx, // @[dcache.scala:25:14]
output [19:0] io_meta_read_bits_tag, // @[dcache.scala:25:14]
output io_resp, // @[dcache.scala:25:14]
output io_idx_valid, // @[dcache.scala:25:14]
output [5:0] io_idx_bits, // @[dcache.scala:25:14]
input io_data_req_ready, // @[dcache.scala:25:14]
output io_data_req_valid, // @[dcache.scala:25:14]
output [3:0] io_data_req_bits_way_en, // @[dcache.scala:25:14]
output [11:0] io_data_req_bits_addr, // @[dcache.scala:25:14]
input [63:0] io_data_resp, // @[dcache.scala:25:14]
input io_mem_grant, // @[dcache.scala:25:14]
input io_release_ready, // @[dcache.scala:25:14]
output io_release_valid, // @[dcache.scala:25:14]
output [2:0] io_release_bits_opcode, // @[dcache.scala:25:14]
output [2:0] io_release_bits_param, // @[dcache.scala:25:14]
output [1:0] io_release_bits_source, // @[dcache.scala:25:14]
output [31:0] io_release_bits_address, // @[dcache.scala:25:14]
output [63:0] io_release_bits_data, // @[dcache.scala:25:14]
input io_lsu_release_ready, // @[dcache.scala:25:14]
output io_lsu_release_valid, // @[dcache.scala:25:14]
output [2:0] io_lsu_release_bits_param, // @[dcache.scala:25:14]
output [1:0] io_lsu_release_bits_source, // @[dcache.scala:25:14]
output [31:0] io_lsu_release_bits_address, // @[dcache.scala:25:14]
output [63:0] io_lsu_release_bits_data // @[dcache.scala:25:14]
);
reg [2:0] state; // @[dcache.scala:39:22]
wire io_req_valid_0 = io_req_valid; // @[dcache.scala:24:7]
wire [19:0] io_req_bits_tag_0 = io_req_bits_tag; // @[dcache.scala:24:7]
wire [5:0] io_req_bits_idx_0 = io_req_bits_idx; // @[dcache.scala:24:7]
wire [1:0] io_req_bits_source_0 = io_req_bits_source; // @[dcache.scala:24:7]
wire [2:0] io_req_bits_param_0 = io_req_bits_param; // @[dcache.scala:24:7]
wire [3:0] io_req_bits_way_en_0 = io_req_bits_way_en; // @[dcache.scala:24:7]
wire io_req_bits_voluntary_0 = io_req_bits_voluntary; // @[dcache.scala:24:7]
wire io_meta_read_ready_0 = io_meta_read_ready; // @[dcache.scala:24:7]
wire io_data_req_ready_0 = io_data_req_ready; // @[dcache.scala:24:7]
wire [63:0] io_data_resp_0 = io_data_resp; // @[dcache.scala:24:7]
wire io_mem_grant_0 = io_mem_grant; // @[dcache.scala:24:7]
wire io_release_ready_0 = io_release_ready; // @[dcache.scala:24:7]
wire io_lsu_release_ready_0 = io_lsu_release_ready; // @[dcache.scala:24:7]
wire [26:0] _r_beats1_decode_T = 27'h3FFC0; // @[package.scala:243:71]
wire [11:0] _r_beats1_decode_T_1 = 12'hFC0; // @[package.scala:243:76]
wire [11:0] _r_beats1_decode_T_2 = 12'h3F; // @[package.scala:243:46]
wire [8:0] r_beats1_decode = 9'h7; // @[Edges.scala:220:59]
wire _voluntaryRelease_legal_T_19 = 1'h1; // @[Parameters.scala:91:44]
wire _voluntaryRelease_legal_T_20 = 1'h1; // @[Parameters.scala:684:29]
wire [2:0] voluntaryRelease_opcode = 3'h7; // @[Edges.scala:396:17]
wire [1:0] voluntaryRelease_source = 2'h2; // @[Edges.scala:396:17]
wire [2:0] io_lsu_release_bits_opcode = 3'h5; // @[dcache.scala:24:7]
wire [2:0] probeResponse_opcode = 3'h5; // @[Edges.scala:433:17]
wire io_release_bits_corrupt = 1'h0; // @[dcache.scala:24:7]
wire io_lsu_release_bits_corrupt = 1'h0; // @[dcache.scala:24:7]
wire probeResponse_corrupt = 1'h0; // @[Edges.scala:433:17]
wire _voluntaryRelease_legal_T = 1'h0; // @[Parameters.scala:684:29]
wire _voluntaryRelease_legal_T_18 = 1'h0; // @[Parameters.scala:684:54]
wire _voluntaryRelease_legal_T_33 = 1'h0; // @[Parameters.scala:686:26]
wire voluntaryRelease_corrupt = 1'h0; // @[Edges.scala:396:17]
wire _io_release_bits_T_corrupt = 1'h0; // @[dcache.scala:124:27]
wire [3:0] io_release_bits_size = 4'h6; // @[dcache.scala:24:7]
wire [3:0] io_lsu_release_bits_size = 4'h6; // @[dcache.scala:24:7]
wire [3:0] probeResponse_size = 4'h6; // @[Edges.scala:433:17]
wire [3:0] voluntaryRelease_size = 4'h6; // @[Edges.scala:396:17]
wire [3:0] _io_release_bits_T_size = 4'h6; // @[dcache.scala:124:27]
wire [3:0] io_meta_read_bits_way_en = 4'h0; // @[dcache.scala:24:7]
wire io_req_ready_0 = ~(|state); // @[dcache.scala:24:7, :39:22, :49:31, :80:15]
wire _io_idx_valid_T; // @[dcache.scala:49:31]
wire [11:0] _io_data_req_bits_addr_T_2; // @[dcache.scala:97:43]
wire [2:0] _io_release_bits_T_opcode; // @[dcache.scala:124:27]
wire [2:0] _io_release_bits_T_param; // @[dcache.scala:124:27]
wire [1:0] _io_release_bits_T_source; // @[dcache.scala:124:27]
wire [31:0] _io_release_bits_T_address; // @[dcache.scala:124:27]
wire [63:0] _io_release_bits_T_data; // @[dcache.scala:124:27]
wire [2:0] probeResponse_param; // @[Edges.scala:433:17]
wire [1:0] probeResponse_source; // @[Edges.scala:433:17]
wire [31:0] probeResponse_address; // @[Edges.scala:433:17]
wire [63:0] probeResponse_data; // @[Edges.scala:433:17]
wire [5:0] io_meta_read_bits_idx_0; // @[dcache.scala:24:7]
wire [19:0] io_meta_read_bits_tag_0; // @[dcache.scala:24:7]
wire io_meta_read_valid_0; // @[dcache.scala:24:7]
wire io_idx_valid_0; // @[dcache.scala:24:7]
wire [5:0] io_idx_bits_0; // @[dcache.scala:24:7]
wire [3:0] io_data_req_bits_way_en_0; // @[dcache.scala:24:7]
wire [11:0] io_data_req_bits_addr_0; // @[dcache.scala:24:7]
wire io_data_req_valid_0; // @[dcache.scala:24:7]
wire [2:0] io_release_bits_opcode_0; // @[dcache.scala:24:7]
wire [2:0] io_release_bits_param_0; // @[dcache.scala:24:7]
wire [1:0] io_release_bits_source_0; // @[dcache.scala:24:7]
wire [31:0] io_release_bits_address_0; // @[dcache.scala:24:7]
wire [63:0] io_release_bits_data_0; // @[dcache.scala:24:7]
wire io_release_valid_0; // @[dcache.scala:24:7]
wire [2:0] io_lsu_release_bits_param_0; // @[dcache.scala:24:7]
wire [1:0] io_lsu_release_bits_source_0; // @[dcache.scala:24:7]
wire [31:0] io_lsu_release_bits_address_0; // @[dcache.scala:24:7]
wire [63:0] io_lsu_release_bits_data_0; // @[dcache.scala:24:7]
wire io_lsu_release_valid_0; // @[dcache.scala:24:7]
wire io_resp_0; // @[dcache.scala:24:7]
reg [19:0] req_tag; // @[dcache.scala:37:16]
assign io_meta_read_bits_tag_0 = req_tag; // @[dcache.scala:24:7, :37:16]
reg [5:0] req_idx; // @[dcache.scala:37:16]
assign io_meta_read_bits_idx_0 = req_idx; // @[dcache.scala:24:7, :37:16]
assign io_idx_bits_0 = req_idx; // @[dcache.scala:24:7, :37:16]
reg [1:0] req_source; // @[dcache.scala:37:16]
assign probeResponse_source = req_source; // @[Edges.scala:433:17]
reg [2:0] req_param; // @[dcache.scala:37:16]
assign probeResponse_param = req_param; // @[Edges.scala:433:17]
wire [2:0] voluntaryRelease_param = req_param; // @[Edges.scala:396:17]
reg [3:0] req_way_en; // @[dcache.scala:37:16]
assign io_data_req_bits_way_en_0 = req_way_en; // @[dcache.scala:24:7, :37:16]
reg req_voluntary; // @[dcache.scala:37:16]
reg r1_data_req_fired; // @[dcache.scala:40:34]
reg r2_data_req_fired; // @[dcache.scala:41:34]
reg [3:0] r1_data_req_cnt; // @[dcache.scala:42:28]
reg [3:0] r2_data_req_cnt; // @[dcache.scala:43:28]
reg [3:0] data_req_cnt; // @[dcache.scala:44:29]
wire _T_14 = io_release_ready_0 & io_release_valid_0; // @[Decoupled.scala:51:35]
wire r_beats1_opdata = io_release_bits_opcode_0[0]; // @[Edges.scala:102:36]
wire [8:0] r_beats1 = r_beats1_opdata ? 9'h7 : 9'h0; // @[Edges.scala:102:36, :220:59, :221:14]
reg [8:0] r_counter; // @[Edges.scala:229:27]
wire [9:0] _r_counter1_T = {1'h0, r_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] r_counter1 = _r_counter1_T[8:0]; // @[Edges.scala:230:28]
wire r_1 = r_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _r_last_T = r_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _r_last_T_1 = r_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire last_beat = _r_last_T | _r_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire all_beats_done = last_beat & _T_14; // @[Decoupled.scala:51:35]
wire [8:0] _r_count_T = ~r_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] beat_count = r_beats1 & _r_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _r_counter_T = r_1 ? r_beats1 : r_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [63:0] wb_buffer_0; // @[dcache.scala:46:22]
reg [63:0] wb_buffer_1; // @[dcache.scala:46:22]
reg [63:0] wb_buffer_2; // @[dcache.scala:46:22]
reg [63:0] wb_buffer_3; // @[dcache.scala:46:22]
reg [63:0] wb_buffer_4; // @[dcache.scala:46:22]
reg [63:0] wb_buffer_5; // @[dcache.scala:46:22]
reg [63:0] wb_buffer_6; // @[dcache.scala:46:22]
reg [63:0] wb_buffer_7; // @[dcache.scala:46:22]
reg acked; // @[dcache.scala:47:22]
assign _io_idx_valid_T = |state; // @[dcache.scala:39:22, :49:31]
assign io_idx_valid_0 = _io_idx_valid_T; // @[dcache.scala:24:7, :49:31]
wire [25:0] _r_address_T = {req_tag, req_idx}; // @[dcache.scala:37:16, :63:22]
wire [31:0] r_address = {_r_address_T, 6'h0}; // @[dcache.scala:63:{22,41}]
assign probeResponse_address = r_address; // @[Edges.scala:433:17]
wire [31:0] _voluntaryRelease_legal_T_1 = r_address; // @[Parameters.scala:137:31]
wire [31:0] voluntaryRelease_address = r_address; // @[Edges.scala:396:17]
wire [2:0] _probeResponse_T = data_req_cnt[2:0]; // @[dcache.scala:44:29]
wire [2:0] _voluntaryRelease_T = data_req_cnt[2:0]; // @[dcache.scala:44:29]
wire [2:0] _io_data_req_bits_addr_T = data_req_cnt[2:0]; // @[dcache.scala:44:29, :96:56]
assign io_lsu_release_bits_param_0 = probeResponse_param; // @[Edges.scala:433:17]
assign io_lsu_release_bits_source_0 = probeResponse_source; // @[Edges.scala:433:17]
assign io_lsu_release_bits_address_0 = probeResponse_address; // @[Edges.scala:433:17]
assign io_lsu_release_bits_data_0 = probeResponse_data; // @[Edges.scala:433:17]
wire [7:0][63:0] _GEN = {{wb_buffer_7}, {wb_buffer_6}, {wb_buffer_5}, {wb_buffer_4}, {wb_buffer_3}, {wb_buffer_2}, {wb_buffer_1}, {wb_buffer_0}}; // @[Edges.scala:441:15]
assign probeResponse_data = _GEN[_probeResponse_T]; // @[Edges.scala:433:17, :441:15]
wire [32:0] _voluntaryRelease_legal_T_2 = {1'h0, _voluntaryRelease_legal_T_1}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _voluntaryRelease_legal_T_3 = _voluntaryRelease_legal_T_2 & 33'h8C000000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _voluntaryRelease_legal_T_4 = _voluntaryRelease_legal_T_3; // @[Parameters.scala:137:46]
wire _voluntaryRelease_legal_T_5 = _voluntaryRelease_legal_T_4 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire [31:0] _voluntaryRelease_legal_T_6 = {r_address[31:17], r_address[16:0] ^ 17'h10000}; // @[Parameters.scala:137:31]
wire [32:0] _voluntaryRelease_legal_T_7 = {1'h0, _voluntaryRelease_legal_T_6}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _voluntaryRelease_legal_T_8 = _voluntaryRelease_legal_T_7 & 33'h8C011000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _voluntaryRelease_legal_T_9 = _voluntaryRelease_legal_T_8; // @[Parameters.scala:137:46]
wire _voluntaryRelease_legal_T_10 = _voluntaryRelease_legal_T_9 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire [31:0] _voluntaryRelease_legal_T_11 = {r_address[31:28], r_address[27:0] ^ 28'hC000000}; // @[Parameters.scala:137:31]
wire [32:0] _voluntaryRelease_legal_T_12 = {1'h0, _voluntaryRelease_legal_T_11}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _voluntaryRelease_legal_T_13 = _voluntaryRelease_legal_T_12 & 33'h8C000000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _voluntaryRelease_legal_T_14 = _voluntaryRelease_legal_T_13; // @[Parameters.scala:137:46]
wire _voluntaryRelease_legal_T_15 = _voluntaryRelease_legal_T_14 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _voluntaryRelease_legal_T_16 = _voluntaryRelease_legal_T_5 | _voluntaryRelease_legal_T_10; // @[Parameters.scala:685:42]
wire _voluntaryRelease_legal_T_17 = _voluntaryRelease_legal_T_16 | _voluntaryRelease_legal_T_15; // @[Parameters.scala:685:42]
wire [31:0] _voluntaryRelease_legal_T_21 = {r_address[31:28], r_address[27:0] ^ 28'h8000000}; // @[Parameters.scala:137:31]
wire [32:0] _voluntaryRelease_legal_T_22 = {1'h0, _voluntaryRelease_legal_T_21}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _voluntaryRelease_legal_T_23 = _voluntaryRelease_legal_T_22 & 33'h8C010000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _voluntaryRelease_legal_T_24 = _voluntaryRelease_legal_T_23; // @[Parameters.scala:137:46]
wire _voluntaryRelease_legal_T_25 = _voluntaryRelease_legal_T_24 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire [31:0] _voluntaryRelease_legal_T_26 = r_address ^ 32'h80000000; // @[Parameters.scala:137:31]
wire [32:0] _voluntaryRelease_legal_T_27 = {1'h0, _voluntaryRelease_legal_T_26}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _voluntaryRelease_legal_T_28 = _voluntaryRelease_legal_T_27 & 33'h80000000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _voluntaryRelease_legal_T_29 = _voluntaryRelease_legal_T_28; // @[Parameters.scala:137:46]
wire _voluntaryRelease_legal_T_30 = _voluntaryRelease_legal_T_29 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _voluntaryRelease_legal_T_31 = _voluntaryRelease_legal_T_25 | _voluntaryRelease_legal_T_30; // @[Parameters.scala:685:42]
wire _voluntaryRelease_legal_T_32 = _voluntaryRelease_legal_T_31; // @[Parameters.scala:684:54, :685:42]
wire voluntaryRelease_legal = _voluntaryRelease_legal_T_32; // @[Parameters.scala:684:54, :686:26]
wire [63:0] voluntaryRelease_data; // @[Edges.scala:396:17]
assign voluntaryRelease_data = _GEN[_voluntaryRelease_T]; // @[Edges.scala:396:17, :404:15, :441:15]
wire _T_3 = state == 3'h1; // @[dcache.scala:39:22, :88:22]
wire _io_meta_read_valid_T = ~(data_req_cnt[3]); // @[dcache.scala:44:29, :89:40]
assign io_meta_read_valid_0 = (|state) & _T_3 & _io_meta_read_valid_T; // @[dcache.scala:24:7, :39:22, :49:31, :54:22, :80:30, :88:{22,41}, :89:{24,40}]
wire _io_data_req_valid_T = ~(data_req_cnt[3]); // @[dcache.scala:44:29, :89:40, :93:39]
assign io_data_req_valid_0 = (|state) & _T_3 & _io_data_req_valid_T; // @[dcache.scala:24:7, :39:22, :49:31, :56:22, :80:30, :88:{22,41}, :93:{23,39}]
wire [8:0] _io_data_req_bits_addr_T_1 = {req_idx, _io_data_req_bits_addr_T}; // @[dcache.scala:37:16, :96:{34,56}]
assign _io_data_req_bits_addr_T_2 = {_io_data_req_bits_addr_T_1, 3'h0}; // @[dcache.scala:96:34, :97:43]
assign io_data_req_bits_addr_0 = _io_data_req_bits_addr_T_2; // @[dcache.scala:24:7, :97:43]
wire [4:0] _GEN_0 = {1'h0, data_req_cnt} + 5'h1; // @[dcache.scala:44:29, :106:36]
wire [4:0] _data_req_cnt_T; // @[dcache.scala:106:36]
assign _data_req_cnt_T = _GEN_0; // @[dcache.scala:106:36]
wire [4:0] _data_req_cnt_T_2; // @[dcache.scala:130:36]
assign _data_req_cnt_T_2 = _GEN_0; // @[dcache.scala:106:36, :130:36]
wire [3:0] _data_req_cnt_T_1 = _data_req_cnt_T[3:0]; // @[dcache.scala:106:36]
wire _T_8 = r2_data_req_cnt == 4'h7; // @[dcache.scala:43:28, :110:29]
assign io_resp_0 = (|state) & _T_3 & r2_data_req_fired & _T_8; // @[dcache.scala:24:7, :39:22, :41:34, :49:31, :58:22, :80:30, :88:{22,41}, :108:30, :110:{29,53}]
wire _T_9 = state == 3'h2; // @[dcache.scala:39:22, :116:22]
assign io_lsu_release_valid_0 = ~(~(|state) | _T_3) & _T_9; // @[dcache.scala:24:7, :39:22, :49:31, :59:24, :80:{15,30}, :88:{22,41}, :116:{22,41}]
wire _T_11 = state == 3'h3; // @[dcache.scala:39:22, :122:22]
wire _io_release_valid_T = ~(data_req_cnt[3]); // @[dcache.scala:44:29, :89:40, :123:38]
wire _GEN_1 = _T_3 | _T_9; // @[dcache.scala:51:22, :88:{22,41}, :116:{22,41}, :122:36]
assign io_release_valid_0 = ~(~(|state) | _GEN_1) & _T_11 & _io_release_valid_T; // @[dcache.scala:24:7, :39:22, :49:31, :51:22, :80:{15,30}, :88:41, :116:41, :122:{22,36}, :123:{22,38}]
assign _io_release_bits_T_opcode = {1'h1, req_voluntary, 1'h1}; // @[dcache.scala:37:16, :124:27]
assign _io_release_bits_T_param = req_voluntary ? voluntaryRelease_param : probeResponse_param; // @[Edges.scala:396:17, :433:17]
assign _io_release_bits_T_source = req_voluntary ? 2'h2 : probeResponse_source; // @[Edges.scala:433:17]
assign _io_release_bits_T_address = req_voluntary ? voluntaryRelease_address : probeResponse_address; // @[Edges.scala:396:17, :433:17]
assign _io_release_bits_T_data = req_voluntary ? voluntaryRelease_data : probeResponse_data; // @[Edges.scala:396:17, :433:17]
assign io_release_bits_opcode_0 = _io_release_bits_T_opcode; // @[dcache.scala:24:7, :124:27]
assign io_release_bits_param_0 = _io_release_bits_T_param; // @[dcache.scala:24:7, :124:27]
assign io_release_bits_source_0 = _io_release_bits_T_source; // @[dcache.scala:24:7, :124:27]
assign io_release_bits_address_0 = _io_release_bits_T_address; // @[dcache.scala:24:7, :124:27]
assign io_release_bits_data_0 = _io_release_bits_T_data; // @[dcache.scala:24:7, :124:27]
wire [3:0] _data_req_cnt_T_3 = _data_req_cnt_T_2[3:0]; // @[dcache.scala:130:36]
wire [2:0] _state_T = {req_voluntary, 2'h0}; // @[dcache.scala:37:16, :133:19]
wire _T_16 = state == 3'h4; // @[dcache.scala:39:22, :135:22]
wire _T_2 = io_req_ready_0 & io_req_valid_0; // @[Decoupled.scala:51:35]
wire _GEN_2 = (|state) & _T_3; // @[dcache.scala:39:22, :41:34, :49:31, :80:30, :88:{22,41}]
wire _T_6 = io_data_req_ready_0 & io_data_req_valid_0 & io_meta_read_ready_0 & io_meta_read_valid_0; // @[Decoupled.scala:51:35]
always @(posedge clock) begin // @[dcache.scala:24:7]
if (~(|state) & _T_2) begin // @[Decoupled.scala:51:35]
req_tag <= io_req_bits_tag_0; // @[dcache.scala:24:7, :37:16]
req_idx <= io_req_bits_idx_0; // @[dcache.scala:24:7, :37:16]
req_source <= io_req_bits_source_0; // @[dcache.scala:24:7, :37:16]
req_param <= io_req_bits_param_0; // @[dcache.scala:24:7, :37:16]
req_way_en <= io_req_bits_way_en_0; // @[dcache.scala:24:7, :37:16]
req_voluntary <= io_req_bits_voluntary_0; // @[dcache.scala:24:7, :37:16]
end
if (_GEN_2) begin // @[dcache.scala:41:34, :43:28, :80:30, :88:41]
r1_data_req_cnt <= _T_6 ? data_req_cnt : 4'h0; // @[Decoupled.scala:51:35]
r2_data_req_cnt <= r1_data_req_cnt; // @[dcache.scala:42:28, :43:28]
end
if ((|state) & _T_3 & r2_data_req_fired & r2_data_req_cnt[2:0] == 3'h0) // @[dcache.scala:39:22, :41:34, :43:28, :46:22, :49:31, :80:30, :88:{22,41}, :108:30, :109:34]
wb_buffer_0 <= io_data_resp_0; // @[dcache.scala:24:7, :46:22]
if ((|state) & _T_3 & r2_data_req_fired & r2_data_req_cnt[2:0] == 3'h1) // @[dcache.scala:39:22, :41:34, :43:28, :46:22, :49:31, :80:30, :88:{22,41}, :108:30, :109:34]
wb_buffer_1 <= io_data_resp_0; // @[dcache.scala:24:7, :46:22]
if ((|state) & _T_3 & r2_data_req_fired & r2_data_req_cnt[2:0] == 3'h2) // @[dcache.scala:39:22, :41:34, :43:28, :46:22, :49:31, :80:30, :88:{22,41}, :108:30, :109:34]
wb_buffer_2 <= io_data_resp_0; // @[dcache.scala:24:7, :46:22]
if ((|state) & _T_3 & r2_data_req_fired & r2_data_req_cnt[2:0] == 3'h3) // @[dcache.scala:39:22, :41:34, :43:28, :46:22, :49:31, :80:30, :88:{22,41}, :108:30, :109:34]
wb_buffer_3 <= io_data_resp_0; // @[dcache.scala:24:7, :46:22]
if ((|state) & _T_3 & r2_data_req_fired & r2_data_req_cnt[2:0] == 3'h4) // @[dcache.scala:39:22, :41:34, :43:28, :46:22, :49:31, :80:30, :88:{22,41}, :108:30, :109:34]
wb_buffer_4 <= io_data_resp_0; // @[dcache.scala:24:7, :46:22]
if ((|state) & _T_3 & r2_data_req_fired & r2_data_req_cnt[2:0] == 3'h5) // @[dcache.scala:39:22, :41:34, :43:28, :46:22, :49:31, :80:30, :88:{22,41}, :108:30, :109:34]
wb_buffer_5 <= io_data_resp_0; // @[dcache.scala:24:7, :46:22]
if ((|state) & _T_3 & r2_data_req_fired & r2_data_req_cnt[2:0] == 3'h6) // @[dcache.scala:39:22, :41:34, :43:28, :46:22, :49:31, :80:30, :88:{22,41}, :108:30, :109:34]
wb_buffer_6 <= io_data_resp_0; // @[dcache.scala:24:7, :46:22]
if ((|state) & _T_3 & r2_data_req_fired & (&(r2_data_req_cnt[2:0]))) // @[dcache.scala:39:22, :41:34, :43:28, :46:22, :49:31, :80:30, :88:{22,41}, :108:30, :109:34]
wb_buffer_7 <= io_data_resp_0; // @[dcache.scala:24:7, :46:22]
if (reset) begin // @[dcache.scala:24:7]
state <= 3'h0; // @[dcache.scala:39:22]
r1_data_req_fired <= 1'h0; // @[dcache.scala:40:34]
r2_data_req_fired <= 1'h0; // @[dcache.scala:41:34]
data_req_cnt <= 4'h0; // @[dcache.scala:44:29]
r_counter <= 9'h0; // @[Edges.scala:229:27]
acked <= 1'h0; // @[dcache.scala:47:22]
end
else begin // @[dcache.scala:24:7]
if (|state) begin // @[dcache.scala:39:22, :49:31]
if (_T_3) begin // @[dcache.scala:88:22]
if (r2_data_req_fired & _T_8) begin // @[dcache.scala:39:22, :41:34, :108:30, :110:{29,53}, :112:15]
state <= 3'h2; // @[dcache.scala:39:22]
data_req_cnt <= 4'h0; // @[dcache.scala:44:29]
end
else if (_T_6) // @[Decoupled.scala:51:35]
data_req_cnt <= _data_req_cnt_T_1; // @[dcache.scala:44:29, :106:36]
end
else begin // @[dcache.scala:88:22]
if (_T_9) begin // @[dcache.scala:116:22]
if (io_lsu_release_ready_0 & io_lsu_release_valid_0) // @[Decoupled.scala:51:35]
state <= 3'h3; // @[dcache.scala:39:22]
end
else if (_T_11) begin // @[dcache.scala:122:22]
if (data_req_cnt == 4'h7 & _T_14) // @[Decoupled.scala:51:35]
state <= _state_T; // @[dcache.scala:39:22, :133:19]
end
else if (_T_16 & acked) // @[dcache.scala:39:22, :47:22, :135:{22,35}, :139:18, :140:13]
state <= 3'h0; // @[dcache.scala:39:22]
if (_T_9 | ~(_T_11 & _T_14)) begin // @[Decoupled.scala:51:35]
end
else // @[dcache.scala:44:29, :116:41, :122:36]
data_req_cnt <= _data_req_cnt_T_3; // @[dcache.scala:44:29, :130:36]
end
if (~_GEN_1) // @[dcache.scala:51:22, :88:41, :116:41, :122:36]
acked <= _T_11 ? io_mem_grant_0 | acked : _T_16 & io_mem_grant_0 | acked; // @[dcache.scala:24:7, :47:22, :122:{22,36}, :126:25, :127:13, :135:{22,35}, :136:25, :137:13]
end
else begin // @[dcache.scala:49:31]
if (_T_2) begin // @[Decoupled.scala:51:35]
state <= 3'h1; // @[dcache.scala:39:22]
data_req_cnt <= 4'h0; // @[dcache.scala:44:29]
end
acked <= ~_T_2 & acked; // @[Decoupled.scala:51:35]
end
if (_GEN_2) begin // @[dcache.scala:41:34, :80:30, :88:41]
r1_data_req_fired <= _T_6; // @[Decoupled.scala:51:35]
r2_data_req_fired <= r1_data_req_fired; // @[dcache.scala:40:34, :41:34]
end
if (_T_14) // @[Decoupled.scala:51:35]
r_counter <= _r_counter_T; // @[Edges.scala:229:27, :236:21]
end
always @(posedge)
assign io_req_ready = io_req_ready_0; // @[dcache.scala:24:7]
assign io_meta_read_valid = io_meta_read_valid_0; // @[dcache.scala:24:7]
assign io_meta_read_bits_idx = io_meta_read_bits_idx_0; // @[dcache.scala:24:7]
assign io_meta_read_bits_tag = io_meta_read_bits_tag_0; // @[dcache.scala:24:7]
assign io_resp = io_resp_0; // @[dcache.scala:24:7]
assign io_idx_valid = io_idx_valid_0; // @[dcache.scala:24:7]
assign io_idx_bits = io_idx_bits_0; // @[dcache.scala:24:7]
assign io_data_req_valid = io_data_req_valid_0; // @[dcache.scala:24:7]
assign io_data_req_bits_way_en = io_data_req_bits_way_en_0; // @[dcache.scala:24:7]
assign io_data_req_bits_addr = io_data_req_bits_addr_0; // @[dcache.scala:24:7]
assign io_release_valid = io_release_valid_0; // @[dcache.scala:24:7]
assign io_release_bits_opcode = io_release_bits_opcode_0; // @[dcache.scala:24:7]
assign io_release_bits_param = io_release_bits_param_0; // @[dcache.scala:24:7]
assign io_release_bits_source = io_release_bits_source_0; // @[dcache.scala:24:7]
assign io_release_bits_address = io_release_bits_address_0; // @[dcache.scala:24:7]
assign io_release_bits_data = io_release_bits_data_0; // @[dcache.scala:24:7]
assign io_lsu_release_valid = io_lsu_release_valid_0; // @[dcache.scala:24:7]
assign io_lsu_release_bits_param = io_lsu_release_bits_param_0; // @[dcache.scala:24:7]
assign io_lsu_release_bits_source = io_lsu_release_bits_source_0; // @[dcache.scala:24:7]
assign io_lsu_release_bits_address = io_lsu_release_bits_address_0; // @[dcache.scala:24:7]
assign io_lsu_release_bits_data = io_lsu_release_bits_data_0; // @[dcache.scala:24:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_136( // @[AsyncQueue.scala:58:7]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in = 1'h1; // @[ShiftReg.scala:45:23]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_157 io_out_source_valid_0 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module PE_342( // @[PE.scala:31:7]
input clock, // @[PE.scala:31:7]
input reset, // @[PE.scala:31:7]
input [7:0] io_in_a, // @[PE.scala:35:14]
input [19:0] io_in_b, // @[PE.scala:35:14]
input [19:0] io_in_d, // @[PE.scala:35:14]
output [7:0] io_out_a, // @[PE.scala:35:14]
output [19:0] io_out_b, // @[PE.scala:35:14]
output [19:0] io_out_c, // @[PE.scala:35:14]
input io_in_control_dataflow, // @[PE.scala:35:14]
input io_in_control_propagate, // @[PE.scala:35:14]
input [4:0] io_in_control_shift, // @[PE.scala:35:14]
output io_out_control_dataflow, // @[PE.scala:35:14]
output io_out_control_propagate, // @[PE.scala:35:14]
output [4:0] io_out_control_shift, // @[PE.scala:35:14]
input [2:0] io_in_id, // @[PE.scala:35:14]
output [2:0] io_out_id, // @[PE.scala:35:14]
input io_in_last, // @[PE.scala:35:14]
output io_out_last, // @[PE.scala:35:14]
input io_in_valid, // @[PE.scala:35:14]
output io_out_valid, // @[PE.scala:35:14]
output io_bad_dataflow // @[PE.scala:35:14]
);
wire [19:0] _mac_unit_io_out_d; // @[PE.scala:64:24]
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7]
wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7]
wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7]
wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7]
wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7]
wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7]
wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7]
wire io_in_last_0 = io_in_last; // @[PE.scala:31:7]
wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7]
wire io_bad_dataflow_0 = 1'h0; // @[PE.scala:31:7]
wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7]
wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37]
wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37]
wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35]
wire [19:0] c1_lo_1 = io_in_d_0; // @[PE.scala:31:7]
wire [19:0] c2_lo_1 = io_in_d_0; // @[PE.scala:31:7]
wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7]
wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7]
wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7]
wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7]
wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7]
wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7]
wire [19:0] io_out_b_0; // @[PE.scala:31:7]
wire [19:0] io_out_c_0; // @[PE.scala:31:7]
reg [31:0] c1; // @[PE.scala:70:15]
wire [31:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15]
wire [31:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38]
reg [31:0] c2; // @[PE.scala:71:15]
wire [31:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15]
wire [31:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38]
reg last_s; // @[PE.scala:89:25]
wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21]
wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25]
wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25]
wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32]
wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32]
wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25]
wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53]
wire [31:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15]
wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}]
wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25]
wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27]
wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27]
wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_7 = _io_out_c_zeros_T_1 & _io_out_c_zeros_T_6; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}]
wire [31:0] _GEN_2 = {27'h0, shift_offset}; // @[PE.scala:91:25]
wire [31:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15]
wire [31:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30]
wire [31:0] _io_out_c_T; // @[Arithmetic.scala:107:15]
assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33]
wire [32:0] _io_out_c_T_2 = {_io_out_c_T[31], _io_out_c_T} + {{31{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}]
wire [31:0] _io_out_c_T_3 = _io_out_c_T_2[31:0]; // @[Arithmetic.scala:107:28]
wire [31:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28]
wire _io_out_c_T_5 = $signed(_io_out_c_T_4) > 32'sh7FFFF; // @[Arithmetic.scala:107:28, :125:33]
wire _io_out_c_T_6 = $signed(_io_out_c_T_4) < -32'sh80000; // @[Arithmetic.scala:107:28, :125:60]
wire [31:0] _io_out_c_T_7 = _io_out_c_T_6 ? 32'hFFF80000 : _io_out_c_T_4; // @[Mux.scala:126:16]
wire [31:0] _io_out_c_T_8 = _io_out_c_T_5 ? 32'h7FFFF : _io_out_c_T_7; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_9 = _io_out_c_T_8[19:0]; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37]
wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37]
wire c1_sign = io_in_d_0[19]; // @[PE.scala:31:7]
wire c2_sign = io_in_d_0[19]; // @[PE.scala:31:7]
wire [1:0] _GEN_4 = {2{c1_sign}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] c1_lo_lo_hi; // @[Arithmetic.scala:118:18]
assign c1_lo_lo_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_lo_hi_hi; // @[Arithmetic.scala:118:18]
assign c1_lo_hi_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_hi_lo_hi; // @[Arithmetic.scala:118:18]
assign c1_hi_lo_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_hi_hi_hi; // @[Arithmetic.scala:118:18]
assign c1_hi_hi_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [2:0] c1_lo_lo = {c1_lo_lo_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c1_lo_hi = {c1_lo_hi_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c1_lo = {c1_lo_hi, c1_lo_lo}; // @[Arithmetic.scala:118:18]
wire [2:0] c1_hi_lo = {c1_hi_lo_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c1_hi_hi = {c1_hi_hi_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c1_hi = {c1_hi_hi, c1_hi_lo}; // @[Arithmetic.scala:118:18]
wire [11:0] _c1_T = {c1_hi, c1_lo}; // @[Arithmetic.scala:118:18]
wire [31:0] _c1_T_1 = {_c1_T, c1_lo_1}; // @[Arithmetic.scala:118:{14,18}]
wire [31:0] _c1_T_2 = _c1_T_1; // @[Arithmetic.scala:118:{14,61}]
wire [31:0] _c1_WIRE = _c1_T_2; // @[Arithmetic.scala:118:61]
wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53]
wire [31:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15]
wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}]
wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_16 = _io_out_c_zeros_T_10 & _io_out_c_zeros_T_15; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}]
wire [31:0] _GEN_5 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15]
wire [31:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T_1 = _GEN_5; // @[Arithmetic.scala:103:30]
wire [31:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15]
assign _io_out_c_T_11 = _GEN_5; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33]
wire [32:0] _io_out_c_T_13 = {_io_out_c_T_11[31], _io_out_c_T_11} + {{31{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}]
wire [31:0] _io_out_c_T_14 = _io_out_c_T_13[31:0]; // @[Arithmetic.scala:107:28]
wire [31:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28]
wire _io_out_c_T_16 = $signed(_io_out_c_T_15) > 32'sh7FFFF; // @[Arithmetic.scala:107:28, :125:33]
wire _io_out_c_T_17 = $signed(_io_out_c_T_15) < -32'sh80000; // @[Arithmetic.scala:107:28, :125:60]
wire [31:0] _io_out_c_T_18 = _io_out_c_T_17 ? 32'hFFF80000 : _io_out_c_T_15; // @[Mux.scala:126:16]
wire [31:0] _io_out_c_T_19 = _io_out_c_T_16 ? 32'h7FFFF : _io_out_c_T_18; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_20 = _io_out_c_T_19[19:0]; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37]
wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37]
wire [1:0] _GEN_6 = {2{c2_sign}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] c2_lo_lo_hi; // @[Arithmetic.scala:118:18]
assign c2_lo_lo_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_lo_hi_hi; // @[Arithmetic.scala:118:18]
assign c2_lo_hi_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_hi_lo_hi; // @[Arithmetic.scala:118:18]
assign c2_hi_lo_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_hi_hi_hi; // @[Arithmetic.scala:118:18]
assign c2_hi_hi_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [2:0] c2_lo_lo = {c2_lo_lo_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c2_lo_hi = {c2_lo_hi_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c2_lo = {c2_lo_hi, c2_lo_lo}; // @[Arithmetic.scala:118:18]
wire [2:0] c2_hi_lo = {c2_hi_lo_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c2_hi_hi = {c2_hi_hi_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c2_hi = {c2_hi_hi, c2_hi_lo}; // @[Arithmetic.scala:118:18]
wire [11:0] _c2_T = {c2_hi, c2_lo}; // @[Arithmetic.scala:118:18]
wire [31:0] _c2_T_1 = {_c2_T, c2_lo_1}; // @[Arithmetic.scala:118:{14,18}]
wire [31:0] _c2_T_2 = _c2_T_1; // @[Arithmetic.scala:118:{14,61}]
wire [31:0] _c2_WIRE = _c2_T_2; // @[Arithmetic.scala:118:61]
wire [31:0] _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38]
wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5[7:0]; // @[PE.scala:121:38]
wire [31:0] _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38]
wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7[7:0]; // @[PE.scala:127:38]
assign io_out_c_0 = io_in_control_dataflow_0 ? (io_in_control_propagate_0 ? c1[19:0] : c2[19:0]) : io_in_control_propagate_0 ? _io_out_c_T_10 : _io_out_c_T_21; // @[PE.scala:31:7, :70:15, :71:15, :102:95, :103:30, :104:16, :111:16, :118:101, :119:30, :120:16, :126:16]
assign io_out_b_0 = io_in_control_dataflow_0 ? _mac_unit_io_out_d : io_in_b_0; // @[PE.scala:31:7, :64:24, :102:95, :103:30, :118:101]
wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35]
wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35]
wire [31:0] _GEN_7 = {{12{io_in_d_0[19]}}, io_in_d_0}; // @[PE.scala:31:7, :124:10]
wire [31:0] _GEN_8 = {{12{_mac_unit_io_out_d[19]}}, _mac_unit_io_out_d}; // @[PE.scala:64:24, :108:10]
always @(posedge clock) begin // @[PE.scala:31:7]
if (io_in_valid_0) begin // @[PE.scala:31:7]
if (io_in_control_dataflow_0) begin // @[PE.scala:31:7]
if (io_in_control_dataflow_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :70:15, :118:101, :119:30, :124:10]
c1 <= _GEN_7; // @[PE.scala:70:15, :124:10]
if (~io_in_control_dataflow_0 | io_in_control_propagate_0) begin // @[PE.scala:31:7, :71:15, :118:101, :119:30]
end
else // @[PE.scala:71:15, :118:101, :119:30]
c2 <= _GEN_7; // @[PE.scala:71:15, :124:10]
end
else begin // @[PE.scala:31:7]
c1 <= io_in_control_propagate_0 ? _c1_WIRE : _GEN_8; // @[PE.scala:31:7, :70:15, :103:30, :108:10, :109:10, :115:10]
c2 <= io_in_control_propagate_0 ? _GEN_8 : _c2_WIRE; // @[PE.scala:31:7, :71:15, :103:30, :108:10, :116:10]
end
last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25]
end
always @(posedge)
MacUnit_86 mac_unit ( // @[PE.scala:64:24]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0), // @[PE.scala:31:7]
.io_in_b (io_in_control_dataflow_0 ? (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3) : io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE : _mac_unit_io_in_b_WIRE_1), // @[PE.scala:31:7, :102:95, :103:30, :106:{24,37}, :113:{24,37}, :118:101, :119:30, :121:{24,38}, :127:{24,38}]
.io_in_c (io_in_control_dataflow_0 ? {{12{io_in_b_0[19]}}, io_in_b_0} : io_in_control_propagate_0 ? c2 : c1), // @[PE.scala:31:7, :70:15, :71:15, :102:95, :103:30, :107:24, :114:24, :118:101, :122:24]
.io_out_d (_mac_unit_io_out_d)
); // @[PE.scala:64:24]
assign io_out_a = io_out_a_0; // @[PE.scala:31:7]
assign io_out_b = io_out_b_0; // @[PE.scala:31:7]
assign io_out_c = io_out_c_0; // @[PE.scala:31:7]
assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7]
assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7]
assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7]
assign io_out_id = io_out_id_0; // @[PE.scala:31:7]
assign io_out_last = io_out_last_0; // @[PE.scala:31:7]
assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7]
assign io_bad_dataflow = io_bad_dataflow_0; // @[PE.scala:31:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_61( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [20:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [20:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_27 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_29 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_33 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_35 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_39 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_41 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_54 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_56 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_60 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_62 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_66 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_68 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_72 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_74 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_78 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_80 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_84 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_86 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_90 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_92 = 1'h1; // @[Parameters.scala:57:20]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_first_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_first_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_wo_ready_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_wo_ready_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_4_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_5_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_wo_ready_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_wo_ready_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [1026:0] _c_opcodes_set_T_1 = 1027'h0; // @[Monitor.scala:767:54]
wire [1026:0] _c_sizes_set_T_1 = 1027'h0; // @[Monitor.scala:768:52]
wire [9:0] _c_opcodes_set_T = 10'h0; // @[Monitor.scala:767:79]
wire [9:0] _c_sizes_set_T = 10'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [127:0] _c_set_wo_ready_T = 128'h1; // @[OneHot.scala:58:35]
wire [127:0] _c_set_T = 128'h1; // @[OneHot.scala:58:35]
wire [259:0] c_opcodes_set = 260'h0; // @[Monitor.scala:740:34]
wire [259:0] c_sizes_set = 260'h0; // @[Monitor.scala:741:34]
wire [64:0] c_set = 65'h0; // @[Monitor.scala:738:34]
wire [64:0] c_set_wo_ready = 65'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [6:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_54 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_55 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_56 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_57 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_58 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_59 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_60 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_61 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_62 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_63 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_64 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_65 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_66 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_67 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_68 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_69 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_70 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_71 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_72 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_73 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_74 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_75 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_76 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_10 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_11 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_12 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_13 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 7'h30; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_13 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_19 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 5'h8; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 5'h9; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_14 = _source_ok_T_13 == 5'hA; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 5'hB; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[2:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] _source_ok_T_25 = io_in_a_bits_source_0[6:3]; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_T_31 = io_in_a_bits_source_0[6:3]; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_T_37 = io_in_a_bits_source_0[6:3]; // @[Monitor.scala:36:7]
wire _source_ok_T_26 = _source_ok_T_25 == 4'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_28 = _source_ok_T_26; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_30 = _source_ok_T_28; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_5 = _source_ok_T_30; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[2:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_32 = _source_ok_T_31 == 4'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_34 = _source_ok_T_32; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_36 = _source_ok_T_34; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_6 = _source_ok_T_36; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[2:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_38 = _source_ok_T_37 == 4'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_40 = _source_ok_T_38; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_42 = _source_ok_T_40; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_7 = _source_ok_T_42; // @[Parameters.scala:1138:31]
wire _source_ok_T_43 = io_in_a_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_8 = _source_ok_T_43; // @[Parameters.scala:1138:31]
wire _source_ok_T_44 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_45 = _source_ok_T_44 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_46 = _source_ok_T_45 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_47 = _source_ok_T_46 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_48 = _source_ok_T_47 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_49 = _source_ok_T_48 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_50 = _source_ok_T_49 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_50 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [20:0] _is_aligned_T = {15'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 21'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_4 = _uncommonBits_T_4[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_5 = _uncommonBits_T_5[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_6 = _uncommonBits_T_6[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_11 = _uncommonBits_T_11[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_12 = _uncommonBits_T_12[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_13 = _uncommonBits_T_13[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_18 = _uncommonBits_T_18[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_19 = _uncommonBits_T_19[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_20 = _uncommonBits_T_20[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_25 = _uncommonBits_T_25[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_26 = _uncommonBits_T_26[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_27 = _uncommonBits_T_27[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_32 = _uncommonBits_T_32[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_33 = _uncommonBits_T_33[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_34 = _uncommonBits_T_34[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_39 = _uncommonBits_T_39[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_40 = _uncommonBits_T_40[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_41 = _uncommonBits_T_41[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_44 = _uncommonBits_T_44[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_46 = _uncommonBits_T_46[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_47 = _uncommonBits_T_47[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_48 = _uncommonBits_T_48[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_49 = _uncommonBits_T_49[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_52 = _uncommonBits_T_52[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_53 = _uncommonBits_T_53[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_54 = _uncommonBits_T_54[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_55 = _uncommonBits_T_55[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_56 = _uncommonBits_T_56[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_57 = _uncommonBits_T_57[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_58 = _uncommonBits_T_58[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_59 = _uncommonBits_T_59[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_60 = _uncommonBits_T_60[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_61 = _uncommonBits_T_61[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_62 = _uncommonBits_T_62[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_63 = _uncommonBits_T_63[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_64 = _uncommonBits_T_64[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_65 = _uncommonBits_T_65[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_66 = _uncommonBits_T_66[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_67 = _uncommonBits_T_67[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_68 = _uncommonBits_T_68[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_69 = _uncommonBits_T_69[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_70 = _uncommonBits_T_70[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_71 = _uncommonBits_T_71[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_72 = _uncommonBits_T_72[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_73 = _uncommonBits_T_73[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_74 = _uncommonBits_T_74[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_75 = _uncommonBits_T_75[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_76 = _uncommonBits_T_76[2:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_51 = io_in_d_bits_source_0 == 7'h30; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_51; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_52 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_58 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_64 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_70 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_53 = _source_ok_T_52 == 5'h8; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_55 = _source_ok_T_53; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_57 = _source_ok_T_55; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_57; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_59 = _source_ok_T_58 == 5'h9; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_61 = _source_ok_T_59; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_63 = _source_ok_T_61; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_63; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_65 = _source_ok_T_64 == 5'hA; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_67 = _source_ok_T_65; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_69 = _source_ok_T_67; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_69; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_10 = _source_ok_uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_71 = _source_ok_T_70 == 5'hB; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_73 = _source_ok_T_71; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_75 = _source_ok_T_73; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_75; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_11 = _source_ok_uncommonBits_T_11[2:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] _source_ok_T_76 = io_in_d_bits_source_0[6:3]; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_T_82 = io_in_d_bits_source_0[6:3]; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_T_88 = io_in_d_bits_source_0[6:3]; // @[Monitor.scala:36:7]
wire _source_ok_T_77 = _source_ok_T_76 == 4'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_79 = _source_ok_T_77; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_81 = _source_ok_T_79; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_5 = _source_ok_T_81; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_12 = _source_ok_uncommonBits_T_12[2:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_83 = _source_ok_T_82 == 4'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_85 = _source_ok_T_83; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_87 = _source_ok_T_85; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_6 = _source_ok_T_87; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_13 = _source_ok_uncommonBits_T_13[2:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_89 = _source_ok_T_88 == 4'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_91 = _source_ok_T_89; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_93 = _source_ok_T_91; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_7 = _source_ok_T_93; // @[Parameters.scala:1138:31]
wire _source_ok_T_94 = io_in_d_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_8 = _source_ok_T_94; // @[Parameters.scala:1138:31]
wire _source_ok_T_95 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_96 = _source_ok_T_95 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_97 = _source_ok_T_96 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_98 = _source_ok_T_97 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_99 = _source_ok_T_98 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_100 = _source_ok_T_99 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_101 = _source_ok_T_100 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_101 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46]
wire _T_1259 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1259; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1259; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [6:0] source; // @[Monitor.scala:390:22]
reg [20:0] address; // @[Monitor.scala:391:22]
wire _T_1327 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1327; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1327; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1327; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [6:0] source_1; // @[Monitor.scala:541:22]
reg [64:0] inflight; // @[Monitor.scala:614:27]
reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [259:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [64:0] a_set; // @[Monitor.scala:626:34]
wire [64:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [259:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [259:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [9:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [9:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [9:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [9:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [9:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [9:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [9:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [9:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [9:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [259:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [259:0] _a_opcode_lookup_T_6 = {256'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [259:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [259:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [259:0] _a_size_lookup_T_6 = {256'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [259:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[259:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [127:0] _GEN_2 = 128'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [127:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1192 = _T_1259 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_1192 ? _a_set_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_1192 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_1192 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [9:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [9:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [9:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [1026:0] _a_opcodes_set_T_1 = {1023'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_1192 ? _a_opcodes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [1026:0] _a_sizes_set_T_1 = {1023'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_1192 ? _a_sizes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [64:0] d_clr; // @[Monitor.scala:664:34]
wire [64:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [259:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [259:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_1238 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [127:0] _GEN_5 = 128'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1238 & ~d_release_ack ? _d_clr_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1207 = _T_1327 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_1207 ? _d_clr_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_5 = 1039'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1207 ? _d_opcodes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [1038:0] _d_sizes_clr_T_5 = 1039'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1207 ? _d_sizes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [64:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [64:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [64:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [259:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [259:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [259:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [259:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [259:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [259:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [64:0] inflight_1; // @[Monitor.scala:726:35]
wire [64:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [259:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [259:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [259:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [259:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [259:0] _c_opcode_lookup_T_6 = {256'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [259:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [259:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [259:0] _c_size_lookup_T_6 = {256'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [259:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[259:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [64:0] d_clr_1; // @[Monitor.scala:774:34]
wire [64:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [259:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [259:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1303 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1303 & d_release_ack_1 ? _d_clr_wo_ready_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1285 = _T_1327 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1285 ? _d_clr_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_11 = 1039'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1285 ? _d_opcodes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [1038:0] _d_sizes_clr_T_11 = 1039'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1285 ? _d_sizes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 7'h0; // @[Monitor.scala:36:7, :795:113]
wire [64:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [64:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [259:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [259:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [259:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [259:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w4_d3_i0_36( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input [3:0] io_d, // @[ShiftReg.scala:36:14]
output [3:0] io_q // @[ShiftReg.scala:36:14]
);
wire [3:0] io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_2 = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_4 = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_6 = reset; // @[SynchronizerReg.scala:86:21]
wire [3:0] _io_q_T; // @[SynchronizerReg.scala:90:14]
wire [3:0] io_q_0; // @[SynchronizerReg.scala:80:7]
wire _output_T_1 = io_d_0[0]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire _output_T_3 = io_d_0[1]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_1; // @[ShiftReg.scala:48:24]
wire _output_T_5 = io_d_0[2]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_2; // @[ShiftReg.scala:48:24]
wire _output_T_7 = io_d_0[3]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_3; // @[ShiftReg.scala:48:24]
wire [1:0] io_q_lo = {output_1, output_0}; // @[SynchronizerReg.scala:90:14]
wire [1:0] io_q_hi = {output_3, output_2}; // @[SynchronizerReg.scala:90:14]
assign _io_q_T = {io_q_hi, io_q_lo}; // @[SynchronizerReg.scala:90:14]
assign io_q_0 = _io_q_T; // @[SynchronizerReg.scala:80:7, :90:14]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_327 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_328 output_chain_1 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T_2), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_3), // @[SynchronizerReg.scala:87:41]
.io_q (output_1)
); // @[ShiftReg.scala:45:23]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_329 output_chain_2 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T_4), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_5), // @[SynchronizerReg.scala:87:41]
.io_q (output_2)
); // @[ShiftReg.scala:45:23]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_330 output_chain_3 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T_6), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_7), // @[SynchronizerReg.scala:87:41]
.io_q (output_3)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Breakpoint.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util.{Cat}
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.tile.{CoreBundle, HasCoreParameters}
import freechips.rocketchip.util._
class BPControl(implicit p: Parameters) extends CoreBundle()(p) {
val ttype = UInt(4.W)
val dmode = Bool()
val maskmax = UInt(6.W)
val reserved = UInt((xLen - (if (coreParams.useBPWatch) 26 else 24)).W)
val action = UInt((if (coreParams.useBPWatch) 3 else 1).W)
val chain = Bool()
val zero = UInt(2.W)
val tmatch = UInt(2.W)
val m = Bool()
val h = Bool()
val s = Bool()
val u = Bool()
val x = Bool()
val w = Bool()
val r = Bool()
def tType = 2
def maskMax = 4
def enabled(mstatus: MStatus) = !mstatus.debug && Cat(m, h, s, u)(mstatus.prv)
}
class TExtra(implicit p: Parameters) extends CoreBundle()(p) {
def mvalueBits: Int = if (xLen == 32) coreParams.mcontextWidth min 6 else coreParams.mcontextWidth min 13
def svalueBits: Int = if (xLen == 32) coreParams.scontextWidth min 16 else coreParams.scontextWidth min 34
def mselectPos: Int = if (xLen == 32) 25 else 50
def mvaluePos : Int = mselectPos + 1
def sselectPos: Int = 0
def svaluePos : Int = 2
val mvalue = UInt(mvalueBits.W)
val mselect = Bool()
val pad2 = UInt((mselectPos - svalueBits - 2).W)
val svalue = UInt(svalueBits.W)
val pad1 = UInt(1.W)
val sselect = Bool()
}
class BP(implicit p: Parameters) extends CoreBundle()(p) {
val control = new BPControl
val address = UInt(vaddrBits.W)
val textra = new TExtra
def contextMatch(mcontext: UInt, scontext: UInt) =
(if (coreParams.mcontextWidth > 0) (!textra.mselect || (mcontext(textra.mvalueBits-1,0) === textra.mvalue)) else true.B) &&
(if (coreParams.scontextWidth > 0) (!textra.sselect || (scontext(textra.svalueBits-1,0) === textra.svalue)) else true.B)
def mask(dummy: Int = 0) =
(0 until control.maskMax-1).scanLeft(control.tmatch(0))((m, i) => m && address(i)).asUInt
def pow2AddressMatch(x: UInt) =
(~x | mask()) === (~address | mask())
def rangeAddressMatch(x: UInt) =
(x >= address) ^ control.tmatch(0)
def addressMatch(x: UInt) =
Mux(control.tmatch(1), rangeAddressMatch(x), pow2AddressMatch(x))
}
class BPWatch (val n: Int) extends Bundle() {
val valid = Vec(n, Bool())
val rvalid = Vec(n, Bool())
val wvalid = Vec(n, Bool())
val ivalid = Vec(n, Bool())
val action = UInt(3.W)
}
class BreakpointUnit(n: Int)(implicit val p: Parameters) extends Module with HasCoreParameters {
val io = IO(new Bundle {
val status = Input(new MStatus())
val bp = Input(Vec(n, new BP))
val pc = Input(UInt(vaddrBits.W))
val ea = Input(UInt(vaddrBits.W))
val mcontext = Input(UInt(coreParams.mcontextWidth.W))
val scontext = Input(UInt(coreParams.scontextWidth.W))
val xcpt_if = Output(Bool())
val xcpt_ld = Output(Bool())
val xcpt_st = Output(Bool())
val debug_if = Output(Bool())
val debug_ld = Output(Bool())
val debug_st = Output(Bool())
val bpwatch = Output(Vec(n, new BPWatch(1)))
})
io.xcpt_if := false.B
io.xcpt_ld := false.B
io.xcpt_st := false.B
io.debug_if := false.B
io.debug_ld := false.B
io.debug_st := false.B
(io.bpwatch zip io.bp).foldLeft((true.B, true.B, true.B)) { case ((ri, wi, xi), (bpw, bp)) =>
val en = bp.control.enabled(io.status)
val cx = bp.contextMatch(io.mcontext, io.scontext)
val r = en && bp.control.r && bp.addressMatch(io.ea) && cx
val w = en && bp.control.w && bp.addressMatch(io.ea) && cx
val x = en && bp.control.x && bp.addressMatch(io.pc) && cx
val end = !bp.control.chain
val action = bp.control.action
bpw.action := action
bpw.valid(0) := false.B
bpw.rvalid(0) := false.B
bpw.wvalid(0) := false.B
bpw.ivalid(0) := false.B
when (end && r && ri) { io.xcpt_ld := (action === 0.U); io.debug_ld := (action === 1.U); bpw.valid(0) := true.B; bpw.rvalid(0) := true.B }
when (end && w && wi) { io.xcpt_st := (action === 0.U); io.debug_st := (action === 1.U); bpw.valid(0) := true.B; bpw.wvalid(0) := true.B }
when (end && x && xi) { io.xcpt_if := (action === 0.U); io.debug_if := (action === 1.U); bpw.valid(0) := true.B; bpw.ivalid(0) := true.B }
(end || r, end || w, end || x)
}
}
| module BreakpointUnit_3( // @[Breakpoint.scala:79:7]
input clock, // @[Breakpoint.scala:79:7]
input reset, // @[Breakpoint.scala:79:7]
input io_status_debug, // @[Breakpoint.scala:80:14]
input io_status_cease, // @[Breakpoint.scala:80:14]
input io_status_wfi, // @[Breakpoint.scala:80:14]
input [31:0] io_status_isa, // @[Breakpoint.scala:80:14]
input [1:0] io_status_dprv, // @[Breakpoint.scala:80:14]
input io_status_dv, // @[Breakpoint.scala:80:14]
input [1:0] io_status_prv, // @[Breakpoint.scala:80:14]
input io_status_v, // @[Breakpoint.scala:80:14]
input io_status_sd, // @[Breakpoint.scala:80:14]
input io_status_mpv, // @[Breakpoint.scala:80:14]
input io_status_gva, // @[Breakpoint.scala:80:14]
input io_status_tsr, // @[Breakpoint.scala:80:14]
input io_status_tw, // @[Breakpoint.scala:80:14]
input io_status_tvm, // @[Breakpoint.scala:80:14]
input io_status_mxr, // @[Breakpoint.scala:80:14]
input io_status_sum, // @[Breakpoint.scala:80:14]
input io_status_mprv, // @[Breakpoint.scala:80:14]
input [1:0] io_status_fs, // @[Breakpoint.scala:80:14]
input [1:0] io_status_mpp, // @[Breakpoint.scala:80:14]
input io_status_spp, // @[Breakpoint.scala:80:14]
input io_status_mpie, // @[Breakpoint.scala:80:14]
input io_status_spie, // @[Breakpoint.scala:80:14]
input io_status_mie, // @[Breakpoint.scala:80:14]
input io_status_sie, // @[Breakpoint.scala:80:14]
input io_bp_0_control_dmode, // @[Breakpoint.scala:80:14]
input io_bp_0_control_action, // @[Breakpoint.scala:80:14]
input [1:0] io_bp_0_control_tmatch, // @[Breakpoint.scala:80:14]
input io_bp_0_control_m, // @[Breakpoint.scala:80:14]
input io_bp_0_control_s, // @[Breakpoint.scala:80:14]
input io_bp_0_control_u, // @[Breakpoint.scala:80:14]
input io_bp_0_control_x, // @[Breakpoint.scala:80:14]
input io_bp_0_control_w, // @[Breakpoint.scala:80:14]
input io_bp_0_control_r, // @[Breakpoint.scala:80:14]
input [38:0] io_bp_0_address, // @[Breakpoint.scala:80:14]
input [47:0] io_bp_0_textra_pad2, // @[Breakpoint.scala:80:14]
input io_bp_0_textra_pad1, // @[Breakpoint.scala:80:14]
input [38:0] io_pc, // @[Breakpoint.scala:80:14]
input [38:0] io_ea, // @[Breakpoint.scala:80:14]
output io_xcpt_if, // @[Breakpoint.scala:80:14]
output io_xcpt_ld, // @[Breakpoint.scala:80:14]
output io_xcpt_st, // @[Breakpoint.scala:80:14]
output io_debug_if, // @[Breakpoint.scala:80:14]
output io_debug_ld, // @[Breakpoint.scala:80:14]
output io_debug_st, // @[Breakpoint.scala:80:14]
output io_bpwatch_0_rvalid_0, // @[Breakpoint.scala:80:14]
output io_bpwatch_0_wvalid_0, // @[Breakpoint.scala:80:14]
output io_bpwatch_0_ivalid_0 // @[Breakpoint.scala:80:14]
);
wire io_status_debug_0 = io_status_debug; // @[Breakpoint.scala:79:7]
wire io_status_cease_0 = io_status_cease; // @[Breakpoint.scala:79:7]
wire io_status_wfi_0 = io_status_wfi; // @[Breakpoint.scala:79:7]
wire [31:0] io_status_isa_0 = io_status_isa; // @[Breakpoint.scala:79:7]
wire [1:0] io_status_dprv_0 = io_status_dprv; // @[Breakpoint.scala:79:7]
wire io_status_dv_0 = io_status_dv; // @[Breakpoint.scala:79:7]
wire [1:0] io_status_prv_0 = io_status_prv; // @[Breakpoint.scala:79:7]
wire io_status_v_0 = io_status_v; // @[Breakpoint.scala:79:7]
wire io_status_sd_0 = io_status_sd; // @[Breakpoint.scala:79:7]
wire io_status_mpv_0 = io_status_mpv; // @[Breakpoint.scala:79:7]
wire io_status_gva_0 = io_status_gva; // @[Breakpoint.scala:79:7]
wire io_status_tsr_0 = io_status_tsr; // @[Breakpoint.scala:79:7]
wire io_status_tw_0 = io_status_tw; // @[Breakpoint.scala:79:7]
wire io_status_tvm_0 = io_status_tvm; // @[Breakpoint.scala:79:7]
wire io_status_mxr_0 = io_status_mxr; // @[Breakpoint.scala:79:7]
wire io_status_sum_0 = io_status_sum; // @[Breakpoint.scala:79:7]
wire io_status_mprv_0 = io_status_mprv; // @[Breakpoint.scala:79:7]
wire [1:0] io_status_fs_0 = io_status_fs; // @[Breakpoint.scala:79:7]
wire [1:0] io_status_mpp_0 = io_status_mpp; // @[Breakpoint.scala:79:7]
wire io_status_spp_0 = io_status_spp; // @[Breakpoint.scala:79:7]
wire io_status_mpie_0 = io_status_mpie; // @[Breakpoint.scala:79:7]
wire io_status_spie_0 = io_status_spie; // @[Breakpoint.scala:79:7]
wire io_status_mie_0 = io_status_mie; // @[Breakpoint.scala:79:7]
wire io_status_sie_0 = io_status_sie; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_dmode_0 = io_bp_0_control_dmode; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_action_0 = io_bp_0_control_action; // @[Breakpoint.scala:79:7]
wire [1:0] io_bp_0_control_tmatch_0 = io_bp_0_control_tmatch; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_m_0 = io_bp_0_control_m; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_s_0 = io_bp_0_control_s; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_u_0 = io_bp_0_control_u; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_x_0 = io_bp_0_control_x; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_w_0 = io_bp_0_control_w; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_r_0 = io_bp_0_control_r; // @[Breakpoint.scala:79:7]
wire [38:0] io_bp_0_address_0 = io_bp_0_address; // @[Breakpoint.scala:79:7]
wire [47:0] io_bp_0_textra_pad2_0 = io_bp_0_textra_pad2; // @[Breakpoint.scala:79:7]
wire io_bp_0_textra_pad1_0 = io_bp_0_textra_pad1; // @[Breakpoint.scala:79:7]
wire [38:0] io_pc_0 = io_pc; // @[Breakpoint.scala:79:7]
wire [38:0] io_ea_0 = io_ea; // @[Breakpoint.scala:79:7]
wire [1:0] io_status_sxl = 2'h2; // @[Breakpoint.scala:79:7, :80:14]
wire [1:0] io_status_uxl = 2'h2; // @[Breakpoint.scala:79:7, :80:14]
wire cx = 1'h1; // @[Breakpoint.scala:55:126]
wire end_0 = 1'h1; // @[Breakpoint.scala:109:15]
wire [39:0] io_bp_0_control_reserved = 40'h0; // @[Breakpoint.scala:79:7, :80:14]
wire [5:0] io_bp_0_control_maskmax = 6'h4; // @[Breakpoint.scala:79:7, :80:14]
wire [3:0] io_bp_0_control_ttype = 4'h2; // @[Breakpoint.scala:79:7, :80:14]
wire [1:0] io_status_xs = 2'h0; // @[Breakpoint.scala:79:7, :80:14]
wire [1:0] io_status_vs = 2'h0; // @[Breakpoint.scala:79:7, :80:14]
wire [1:0] io_bp_0_control_zero = 2'h0; // @[Breakpoint.scala:79:7, :80:14]
wire [7:0] io_status_zero1 = 8'h0; // @[Breakpoint.scala:79:7, :80:14]
wire io_status_mbe = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_sbe = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_sd_rv32 = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_ube = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_upie = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_hie = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_uie = 1'h0; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_chain = 1'h0; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_h = 1'h0; // @[Breakpoint.scala:79:7]
wire io_bp_0_textra_mselect = 1'h0; // @[Breakpoint.scala:79:7]
wire io_bp_0_textra_sselect = 1'h0; // @[Breakpoint.scala:79:7]
wire [22:0] io_status_zero2 = 23'h0; // @[Breakpoint.scala:79:7, :80:14]
wire _io_debug_ld_T = io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :118:84]
wire _io_debug_st_T = io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :119:84]
wire _io_debug_if_T = io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :120:84]
wire r; // @[Breakpoint.scala:106:58]
wire w; // @[Breakpoint.scala:107:58]
wire x; // @[Breakpoint.scala:108:58]
wire io_bpwatch_0_valid_0; // @[Breakpoint.scala:79:7]
wire io_bpwatch_0_rvalid_0_0; // @[Breakpoint.scala:79:7]
wire io_bpwatch_0_wvalid_0_0; // @[Breakpoint.scala:79:7]
wire io_bpwatch_0_ivalid_0_0; // @[Breakpoint.scala:79:7]
wire [2:0] io_bpwatch_0_action; // @[Breakpoint.scala:79:7]
wire io_xcpt_if_0; // @[Breakpoint.scala:79:7]
wire io_xcpt_ld_0; // @[Breakpoint.scala:79:7]
wire io_xcpt_st_0; // @[Breakpoint.scala:79:7]
wire io_debug_if_0; // @[Breakpoint.scala:79:7]
wire io_debug_ld_0; // @[Breakpoint.scala:79:7]
wire io_debug_st_0; // @[Breakpoint.scala:79:7]
wire _en_T = ~io_status_debug_0; // @[Breakpoint.scala:30:35, :79:7]
wire [1:0] en_lo = {io_bp_0_control_s_0, io_bp_0_control_u_0}; // @[Breakpoint.scala:30:56, :79:7]
wire [1:0] en_hi = {io_bp_0_control_m_0, 1'h0}; // @[Breakpoint.scala:30:56, :79:7]
wire [3:0] _en_T_1 = {en_hi, en_lo}; // @[Breakpoint.scala:30:56]
wire [3:0] _en_T_2 = _en_T_1 >> io_status_prv_0; // @[Breakpoint.scala:30:{56,68}, :79:7]
wire _en_T_3 = _en_T_2[0]; // @[Breakpoint.scala:30:68]
wire en = _en_T & _en_T_3; // @[Breakpoint.scala:30:{35,50,68}]
wire _r_T = en & io_bp_0_control_r_0; // @[Breakpoint.scala:30:50, :79:7, :106:16]
wire _r_T_1 = io_bp_0_control_tmatch_0[1]; // @[Breakpoint.scala:68:23, :79:7]
wire _w_T_1 = io_bp_0_control_tmatch_0[1]; // @[Breakpoint.scala:68:23, :79:7]
wire _x_T_1 = io_bp_0_control_tmatch_0[1]; // @[Breakpoint.scala:68:23, :79:7]
wire _GEN = io_ea_0 >= io_bp_0_address_0; // @[Breakpoint.scala:65:8, :79:7]
wire _r_T_2; // @[Breakpoint.scala:65:8]
assign _r_T_2 = _GEN; // @[Breakpoint.scala:65:8]
wire _w_T_2; // @[Breakpoint.scala:65:8]
assign _w_T_2 = _GEN; // @[Breakpoint.scala:65:8]
wire _r_T_3 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:65:36, :79:7]
wire _r_T_6 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _r_T_16 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _w_T_3 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:65:36, :79:7]
wire _w_T_6 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _w_T_16 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _x_T_3 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:65:36, :79:7]
wire _x_T_6 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _x_T_16 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _r_T_4 = _r_T_2 ^ _r_T_3; // @[Breakpoint.scala:65:{8,20,36}]
wire [38:0] _r_T_5 = ~io_ea_0; // @[Breakpoint.scala:62:6, :79:7]
wire _r_T_7 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_17 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_7 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_17 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_7 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_17 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_8 = _r_T_6 & _r_T_7; // @[Breakpoint.scala:59:{56,73,83}]
wire _r_T_9 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_19 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_9 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_19 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_9 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_19 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_10 = _r_T_8 & _r_T_9; // @[Breakpoint.scala:59:{73,83}]
wire _r_T_11 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_21 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_11 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_21 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_11 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_21 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_12 = _r_T_10 & _r_T_11; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] r_lo = {_r_T_8, _r_T_6}; // @[package.scala:45:27]
wire [1:0] r_hi = {_r_T_12, _r_T_10}; // @[package.scala:45:27]
wire [3:0] _r_T_13 = {r_hi, r_lo}; // @[package.scala:45:27]
wire [38:0] _r_T_14 = {_r_T_5[38:4], _r_T_5[3:0] | _r_T_13}; // @[package.scala:45:27]
wire [38:0] _r_T_15 = ~io_bp_0_address_0; // @[Breakpoint.scala:62:24, :79:7]
wire _r_T_18 = _r_T_16 & _r_T_17; // @[Breakpoint.scala:59:{56,73,83}]
wire _r_T_20 = _r_T_18 & _r_T_19; // @[Breakpoint.scala:59:{73,83}]
wire _r_T_22 = _r_T_20 & _r_T_21; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] r_lo_1 = {_r_T_18, _r_T_16}; // @[package.scala:45:27]
wire [1:0] r_hi_1 = {_r_T_22, _r_T_20}; // @[package.scala:45:27]
wire [3:0] _r_T_23 = {r_hi_1, r_lo_1}; // @[package.scala:45:27]
wire [38:0] _r_T_24 = {_r_T_15[38:4], _r_T_15[3:0] | _r_T_23}; // @[package.scala:45:27]
wire _r_T_25 = _r_T_14 == _r_T_24; // @[Breakpoint.scala:62:{9,19,33}]
wire _r_T_26 = _r_T_1 ? _r_T_4 : _r_T_25; // @[Breakpoint.scala:62:19, :65:20, :68:{8,23}]
wire _r_T_27 = _r_T & _r_T_26; // @[Breakpoint.scala:68:8, :106:{16,32}]
assign r = _r_T_27; // @[Breakpoint.scala:106:{32,58}]
assign io_bpwatch_0_rvalid_0_0 = r; // @[Breakpoint.scala:79:7, :106:58]
wire _w_T = en & io_bp_0_control_w_0; // @[Breakpoint.scala:30:50, :79:7, :107:16]
wire _w_T_4 = _w_T_2 ^ _w_T_3; // @[Breakpoint.scala:65:{8,20,36}]
wire [38:0] _w_T_5 = ~io_ea_0; // @[Breakpoint.scala:62:6, :79:7]
wire _w_T_8 = _w_T_6 & _w_T_7; // @[Breakpoint.scala:59:{56,73,83}]
wire _w_T_10 = _w_T_8 & _w_T_9; // @[Breakpoint.scala:59:{73,83}]
wire _w_T_12 = _w_T_10 & _w_T_11; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] w_lo = {_w_T_8, _w_T_6}; // @[package.scala:45:27]
wire [1:0] w_hi = {_w_T_12, _w_T_10}; // @[package.scala:45:27]
wire [3:0] _w_T_13 = {w_hi, w_lo}; // @[package.scala:45:27]
wire [38:0] _w_T_14 = {_w_T_5[38:4], _w_T_5[3:0] | _w_T_13}; // @[package.scala:45:27]
wire [38:0] _w_T_15 = ~io_bp_0_address_0; // @[Breakpoint.scala:62:24, :79:7]
wire _w_T_18 = _w_T_16 & _w_T_17; // @[Breakpoint.scala:59:{56,73,83}]
wire _w_T_20 = _w_T_18 & _w_T_19; // @[Breakpoint.scala:59:{73,83}]
wire _w_T_22 = _w_T_20 & _w_T_21; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] w_lo_1 = {_w_T_18, _w_T_16}; // @[package.scala:45:27]
wire [1:0] w_hi_1 = {_w_T_22, _w_T_20}; // @[package.scala:45:27]
wire [3:0] _w_T_23 = {w_hi_1, w_lo_1}; // @[package.scala:45:27]
wire [38:0] _w_T_24 = {_w_T_15[38:4], _w_T_15[3:0] | _w_T_23}; // @[package.scala:45:27]
wire _w_T_25 = _w_T_14 == _w_T_24; // @[Breakpoint.scala:62:{9,19,33}]
wire _w_T_26 = _w_T_1 ? _w_T_4 : _w_T_25; // @[Breakpoint.scala:62:19, :65:20, :68:{8,23}]
wire _w_T_27 = _w_T & _w_T_26; // @[Breakpoint.scala:68:8, :107:{16,32}]
assign w = _w_T_27; // @[Breakpoint.scala:107:{32,58}]
assign io_bpwatch_0_wvalid_0_0 = w; // @[Breakpoint.scala:79:7, :107:58]
wire _x_T = en & io_bp_0_control_x_0; // @[Breakpoint.scala:30:50, :79:7, :108:16]
wire _x_T_2 = io_pc_0 >= io_bp_0_address_0; // @[Breakpoint.scala:65:8, :79:7]
wire _x_T_4 = _x_T_2 ^ _x_T_3; // @[Breakpoint.scala:65:{8,20,36}]
wire [38:0] _x_T_5 = ~io_pc_0; // @[Breakpoint.scala:62:6, :79:7]
wire _x_T_8 = _x_T_6 & _x_T_7; // @[Breakpoint.scala:59:{56,73,83}]
wire _x_T_10 = _x_T_8 & _x_T_9; // @[Breakpoint.scala:59:{73,83}]
wire _x_T_12 = _x_T_10 & _x_T_11; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] x_lo = {_x_T_8, _x_T_6}; // @[package.scala:45:27]
wire [1:0] x_hi = {_x_T_12, _x_T_10}; // @[package.scala:45:27]
wire [3:0] _x_T_13 = {x_hi, x_lo}; // @[package.scala:45:27]
wire [38:0] _x_T_14 = {_x_T_5[38:4], _x_T_5[3:0] | _x_T_13}; // @[package.scala:45:27]
wire [38:0] _x_T_15 = ~io_bp_0_address_0; // @[Breakpoint.scala:62:24, :79:7]
wire _x_T_18 = _x_T_16 & _x_T_17; // @[Breakpoint.scala:59:{56,73,83}]
wire _x_T_20 = _x_T_18 & _x_T_19; // @[Breakpoint.scala:59:{73,83}]
wire _x_T_22 = _x_T_20 & _x_T_21; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] x_lo_1 = {_x_T_18, _x_T_16}; // @[package.scala:45:27]
wire [1:0] x_hi_1 = {_x_T_22, _x_T_20}; // @[package.scala:45:27]
wire [3:0] _x_T_23 = {x_hi_1, x_lo_1}; // @[package.scala:45:27]
wire [38:0] _x_T_24 = {_x_T_15[38:4], _x_T_15[3:0] | _x_T_23}; // @[package.scala:45:27]
wire _x_T_25 = _x_T_14 == _x_T_24; // @[Breakpoint.scala:62:{9,19,33}]
wire _x_T_26 = _x_T_1 ? _x_T_4 : _x_T_25; // @[Breakpoint.scala:62:19, :65:20, :68:{8,23}]
wire _x_T_27 = _x_T & _x_T_26; // @[Breakpoint.scala:68:8, :108:{16,32}]
assign x = _x_T_27; // @[Breakpoint.scala:108:{32,58}]
assign io_bpwatch_0_ivalid_0_0 = x; // @[Breakpoint.scala:79:7, :108:58]
assign io_bpwatch_0_action = {2'h0, io_bp_0_control_action_0}; // @[Breakpoint.scala:79:7, :80:14, :112:16]
wire _io_xcpt_ld_T = ~io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :118:51]
assign io_xcpt_ld_0 = r & _io_xcpt_ld_T; // @[Breakpoint.scala:79:7, :97:14, :106:58, :118:{27,40,51}]
assign io_debug_ld_0 = r & _io_debug_ld_T; // @[Breakpoint.scala:79:7, :100:15, :106:58, :118:{27,73,84}]
wire _io_xcpt_st_T = ~io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :118:51, :119:51]
assign io_xcpt_st_0 = w & _io_xcpt_st_T; // @[Breakpoint.scala:79:7, :98:14, :107:58, :119:{27,40,51}]
assign io_debug_st_0 = w & _io_debug_st_T; // @[Breakpoint.scala:79:7, :101:15, :107:58, :119:{27,73,84}]
wire _io_xcpt_if_T = ~io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :118:51, :120:51]
assign io_xcpt_if_0 = x & _io_xcpt_if_T; // @[Breakpoint.scala:79:7, :96:14, :108:58, :120:{27,40,51}]
assign io_debug_if_0 = x & _io_debug_if_T; // @[Breakpoint.scala:79:7, :99:15, :108:58, :120:{27,73,84}]
assign io_bpwatch_0_valid_0 = x | w | r; // @[Breakpoint.scala:79:7, :106:58, :107:58, :108:58, :118:27, :119:{27,107}, :120:{27,107}]
assign io_xcpt_if = io_xcpt_if_0; // @[Breakpoint.scala:79:7]
assign io_xcpt_ld = io_xcpt_ld_0; // @[Breakpoint.scala:79:7]
assign io_xcpt_st = io_xcpt_st_0; // @[Breakpoint.scala:79:7]
assign io_debug_if = io_debug_if_0; // @[Breakpoint.scala:79:7]
assign io_debug_ld = io_debug_ld_0; // @[Breakpoint.scala:79:7]
assign io_debug_st = io_debug_st_0; // @[Breakpoint.scala:79:7]
assign io_bpwatch_0_rvalid_0 = io_bpwatch_0_rvalid_0_0; // @[Breakpoint.scala:79:7]
assign io_bpwatch_0_wvalid_0 = io_bpwatch_0_wvalid_0_0; // @[Breakpoint.scala:79:7]
assign io_bpwatch_0_ivalid_0 = io_bpwatch_0_ivalid_0_0; // @[Breakpoint.scala:79:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
| module TLBuffer_a32d64s6k3z3c( // @[Buffer.scala:40:9]
input clock, // @[Buffer.scala:40:9]
input reset, // @[Buffer.scala:40:9]
output auto_in_3_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_3_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_3_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_3_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_3_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_3_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_3_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_3_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_3_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_3_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_3_b_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_3_b_valid, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_3_b_bits_param, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_in_3_b_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_in_3_b_bits_address, // @[LazyModuleImp.scala:107:25]
output auto_in_3_c_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_3_c_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_3_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_3_c_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_3_c_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_3_c_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_3_c_bits_address, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_3_c_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_3_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_3_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_3_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_3_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_3_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_3_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_in_3_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_3_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_3_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_3_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_3_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_3_e_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_3_e_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_2_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_2_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_2_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_2_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_2_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_2_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_2_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_2_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_2_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_2_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_2_b_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_2_b_valid, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_2_b_bits_param, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_in_2_b_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_in_2_b_bits_address, // @[LazyModuleImp.scala:107:25]
output auto_in_2_c_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_2_c_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_2_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_2_c_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_2_c_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_2_c_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_2_c_bits_address, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_2_c_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_2_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_2_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_2_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_2_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_2_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_2_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_in_2_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_2_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_2_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_2_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_2_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_2_e_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_2_e_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_1_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_1_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_1_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_1_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_1_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_1_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_1_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_1_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_1_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_1_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_1_b_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_1_b_valid, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_1_b_bits_param, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_in_1_b_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_in_1_b_bits_address, // @[LazyModuleImp.scala:107:25]
output auto_in_1_c_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_1_c_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_1_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_1_c_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_1_c_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_1_c_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_1_c_bits_address, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_1_c_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_1_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_1_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_1_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_1_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_1_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_1_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_in_1_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_1_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_1_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_1_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_1_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_1_e_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_1_e_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_0_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_0_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_0_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_0_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_0_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_0_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_0_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_0_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_0_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_0_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_0_b_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_0_b_valid, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_0_b_bits_param, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_in_0_b_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_in_0_b_bits_address, // @[LazyModuleImp.scala:107:25]
output auto_in_0_c_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_0_c_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_0_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_0_c_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_0_c_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_0_c_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_0_c_bits_address, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_0_c_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_0_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_0_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_0_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_0_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_0_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_0_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_in_0_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_0_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_0_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_0_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_0_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_0_e_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_0_e_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_3_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_3_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_3_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_3_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_3_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_out_3_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_3_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_3_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_3_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_3_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_3_b_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_3_b_valid, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_3_b_bits_param, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_out_3_b_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_out_3_b_bits_address, // @[LazyModuleImp.scala:107:25]
input auto_out_3_c_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_3_c_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_3_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_3_c_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_3_c_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_out_3_c_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_3_c_bits_address, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_3_c_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_3_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_3_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_3_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_3_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_3_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_3_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_out_3_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_3_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_3_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_3_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_3_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_3_e_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_3_e_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_2_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_2_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_2_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_2_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_2_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_out_2_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_2_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_2_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_2_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_2_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_2_b_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_2_b_valid, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_2_b_bits_param, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_out_2_b_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_out_2_b_bits_address, // @[LazyModuleImp.scala:107:25]
input auto_out_2_c_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_2_c_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_2_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_2_c_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_2_c_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_out_2_c_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_2_c_bits_address, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_2_c_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_2_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_2_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_2_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_2_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_2_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_2_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_out_2_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_2_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_2_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_2_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_2_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_2_e_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_2_e_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_1_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_1_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_1_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_1_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_1_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_out_1_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_1_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_1_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_1_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_1_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_1_b_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_1_b_valid, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_1_b_bits_param, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_out_1_b_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_out_1_b_bits_address, // @[LazyModuleImp.scala:107:25]
input auto_out_1_c_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_1_c_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_1_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_1_c_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_1_c_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_out_1_c_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_1_c_bits_address, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_1_c_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_1_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_1_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_1_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_1_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_1_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_1_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_out_1_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_1_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_1_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_1_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_1_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_1_e_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_1_e_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_0_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_0_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_0_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_0_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_0_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_out_0_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_0_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_0_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_0_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_0_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_0_b_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_0_b_valid, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_0_b_bits_param, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_out_0_b_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_out_0_b_bits_address, // @[LazyModuleImp.scala:107:25]
input auto_out_0_c_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_0_c_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_0_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_0_c_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_0_c_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_out_0_c_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_0_c_bits_address, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_0_c_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_0_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_0_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_0_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_0_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_0_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_0_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_out_0_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_0_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_0_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_0_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_0_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_0_e_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_0_e_bits_sink // @[LazyModuleImp.scala:107:25]
);
wire _nodeIn_d_q_3_io_deq_valid; // @[Decoupled.scala:362:21]
wire [2:0] _nodeIn_d_q_3_io_deq_bits_opcode; // @[Decoupled.scala:362:21]
wire [1:0] _nodeIn_d_q_3_io_deq_bits_param; // @[Decoupled.scala:362:21]
wire [2:0] _nodeIn_d_q_3_io_deq_bits_size; // @[Decoupled.scala:362:21]
wire [5:0] _nodeIn_d_q_3_io_deq_bits_source; // @[Decoupled.scala:362:21]
wire [2:0] _nodeIn_d_q_3_io_deq_bits_sink; // @[Decoupled.scala:362:21]
wire _nodeIn_d_q_3_io_deq_bits_denied; // @[Decoupled.scala:362:21]
wire _nodeIn_d_q_3_io_deq_bits_corrupt; // @[Decoupled.scala:362:21]
wire _nodeOut_a_q_3_io_enq_ready; // @[Decoupled.scala:362:21]
wire _nodeIn_d_q_2_io_deq_valid; // @[Decoupled.scala:362:21]
wire [2:0] _nodeIn_d_q_2_io_deq_bits_opcode; // @[Decoupled.scala:362:21]
wire [1:0] _nodeIn_d_q_2_io_deq_bits_param; // @[Decoupled.scala:362:21]
wire [2:0] _nodeIn_d_q_2_io_deq_bits_size; // @[Decoupled.scala:362:21]
wire [5:0] _nodeIn_d_q_2_io_deq_bits_source; // @[Decoupled.scala:362:21]
wire [2:0] _nodeIn_d_q_2_io_deq_bits_sink; // @[Decoupled.scala:362:21]
wire _nodeIn_d_q_2_io_deq_bits_denied; // @[Decoupled.scala:362:21]
wire _nodeIn_d_q_2_io_deq_bits_corrupt; // @[Decoupled.scala:362:21]
wire _nodeOut_a_q_2_io_enq_ready; // @[Decoupled.scala:362:21]
wire _nodeIn_d_q_1_io_deq_valid; // @[Decoupled.scala:362:21]
wire [2:0] _nodeIn_d_q_1_io_deq_bits_opcode; // @[Decoupled.scala:362:21]
wire [1:0] _nodeIn_d_q_1_io_deq_bits_param; // @[Decoupled.scala:362:21]
wire [2:0] _nodeIn_d_q_1_io_deq_bits_size; // @[Decoupled.scala:362:21]
wire [5:0] _nodeIn_d_q_1_io_deq_bits_source; // @[Decoupled.scala:362:21]
wire [2:0] _nodeIn_d_q_1_io_deq_bits_sink; // @[Decoupled.scala:362:21]
wire _nodeIn_d_q_1_io_deq_bits_denied; // @[Decoupled.scala:362:21]
wire _nodeIn_d_q_1_io_deq_bits_corrupt; // @[Decoupled.scala:362:21]
wire _nodeOut_a_q_1_io_enq_ready; // @[Decoupled.scala:362:21]
wire _nodeIn_d_q_io_deq_valid; // @[Decoupled.scala:362:21]
wire [2:0] _nodeIn_d_q_io_deq_bits_opcode; // @[Decoupled.scala:362:21]
wire [1:0] _nodeIn_d_q_io_deq_bits_param; // @[Decoupled.scala:362:21]
wire [2:0] _nodeIn_d_q_io_deq_bits_size; // @[Decoupled.scala:362:21]
wire [5:0] _nodeIn_d_q_io_deq_bits_source; // @[Decoupled.scala:362:21]
wire [2:0] _nodeIn_d_q_io_deq_bits_sink; // @[Decoupled.scala:362:21]
wire _nodeIn_d_q_io_deq_bits_denied; // @[Decoupled.scala:362:21]
wire _nodeIn_d_q_io_deq_bits_corrupt; // @[Decoupled.scala:362:21]
wire _nodeOut_a_q_io_enq_ready; // @[Decoupled.scala:362:21]
TLMonitor_50 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (_nodeOut_a_q_io_enq_ready), // @[Decoupled.scala:362:21]
.io_in_a_valid (auto_in_0_a_valid),
.io_in_a_bits_opcode (auto_in_0_a_bits_opcode),
.io_in_a_bits_param (auto_in_0_a_bits_param),
.io_in_a_bits_size (auto_in_0_a_bits_size),
.io_in_a_bits_source (auto_in_0_a_bits_source),
.io_in_a_bits_address (auto_in_0_a_bits_address),
.io_in_a_bits_mask (auto_in_0_a_bits_mask),
.io_in_a_bits_corrupt (auto_in_0_a_bits_corrupt),
.io_in_b_ready (auto_in_0_b_ready),
.io_in_b_valid (auto_out_0_b_valid),
.io_in_b_bits_param (auto_out_0_b_bits_param),
.io_in_b_bits_source (auto_out_0_b_bits_source),
.io_in_b_bits_address (auto_out_0_b_bits_address),
.io_in_c_ready (auto_out_0_c_ready),
.io_in_c_valid (auto_in_0_c_valid),
.io_in_c_bits_opcode (auto_in_0_c_bits_opcode),
.io_in_c_bits_param (auto_in_0_c_bits_param),
.io_in_c_bits_size (auto_in_0_c_bits_size),
.io_in_c_bits_source (auto_in_0_c_bits_source),
.io_in_c_bits_address (auto_in_0_c_bits_address),
.io_in_c_bits_corrupt (auto_in_0_c_bits_corrupt),
.io_in_d_ready (auto_in_0_d_ready),
.io_in_d_valid (_nodeIn_d_q_io_deq_valid), // @[Decoupled.scala:362:21]
.io_in_d_bits_opcode (_nodeIn_d_q_io_deq_bits_opcode), // @[Decoupled.scala:362:21]
.io_in_d_bits_param (_nodeIn_d_q_io_deq_bits_param), // @[Decoupled.scala:362:21]
.io_in_d_bits_size (_nodeIn_d_q_io_deq_bits_size), // @[Decoupled.scala:362:21]
.io_in_d_bits_source (_nodeIn_d_q_io_deq_bits_source), // @[Decoupled.scala:362:21]
.io_in_d_bits_sink (_nodeIn_d_q_io_deq_bits_sink), // @[Decoupled.scala:362:21]
.io_in_d_bits_denied (_nodeIn_d_q_io_deq_bits_denied), // @[Decoupled.scala:362:21]
.io_in_d_bits_corrupt (_nodeIn_d_q_io_deq_bits_corrupt), // @[Decoupled.scala:362:21]
.io_in_e_valid (auto_in_0_e_valid),
.io_in_e_bits_sink (auto_in_0_e_bits_sink)
); // @[Nodes.scala:27:25]
TLMonitor_51 monitor_1 ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (_nodeOut_a_q_1_io_enq_ready), // @[Decoupled.scala:362:21]
.io_in_a_valid (auto_in_1_a_valid),
.io_in_a_bits_opcode (auto_in_1_a_bits_opcode),
.io_in_a_bits_param (auto_in_1_a_bits_param),
.io_in_a_bits_size (auto_in_1_a_bits_size),
.io_in_a_bits_source (auto_in_1_a_bits_source),
.io_in_a_bits_address (auto_in_1_a_bits_address),
.io_in_a_bits_mask (auto_in_1_a_bits_mask),
.io_in_a_bits_corrupt (auto_in_1_a_bits_corrupt),
.io_in_b_ready (auto_in_1_b_ready),
.io_in_b_valid (auto_out_1_b_valid),
.io_in_b_bits_param (auto_out_1_b_bits_param),
.io_in_b_bits_source (auto_out_1_b_bits_source),
.io_in_b_bits_address (auto_out_1_b_bits_address),
.io_in_c_ready (auto_out_1_c_ready),
.io_in_c_valid (auto_in_1_c_valid),
.io_in_c_bits_opcode (auto_in_1_c_bits_opcode),
.io_in_c_bits_param (auto_in_1_c_bits_param),
.io_in_c_bits_size (auto_in_1_c_bits_size),
.io_in_c_bits_source (auto_in_1_c_bits_source),
.io_in_c_bits_address (auto_in_1_c_bits_address),
.io_in_c_bits_corrupt (auto_in_1_c_bits_corrupt),
.io_in_d_ready (auto_in_1_d_ready),
.io_in_d_valid (_nodeIn_d_q_1_io_deq_valid), // @[Decoupled.scala:362:21]
.io_in_d_bits_opcode (_nodeIn_d_q_1_io_deq_bits_opcode), // @[Decoupled.scala:362:21]
.io_in_d_bits_param (_nodeIn_d_q_1_io_deq_bits_param), // @[Decoupled.scala:362:21]
.io_in_d_bits_size (_nodeIn_d_q_1_io_deq_bits_size), // @[Decoupled.scala:362:21]
.io_in_d_bits_source (_nodeIn_d_q_1_io_deq_bits_source), // @[Decoupled.scala:362:21]
.io_in_d_bits_sink (_nodeIn_d_q_1_io_deq_bits_sink), // @[Decoupled.scala:362:21]
.io_in_d_bits_denied (_nodeIn_d_q_1_io_deq_bits_denied), // @[Decoupled.scala:362:21]
.io_in_d_bits_corrupt (_nodeIn_d_q_1_io_deq_bits_corrupt), // @[Decoupled.scala:362:21]
.io_in_e_valid (auto_in_1_e_valid),
.io_in_e_bits_sink (auto_in_1_e_bits_sink)
); // @[Nodes.scala:27:25]
TLMonitor_52 monitor_2 ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (_nodeOut_a_q_2_io_enq_ready), // @[Decoupled.scala:362:21]
.io_in_a_valid (auto_in_2_a_valid),
.io_in_a_bits_opcode (auto_in_2_a_bits_opcode),
.io_in_a_bits_param (auto_in_2_a_bits_param),
.io_in_a_bits_size (auto_in_2_a_bits_size),
.io_in_a_bits_source (auto_in_2_a_bits_source),
.io_in_a_bits_address (auto_in_2_a_bits_address),
.io_in_a_bits_mask (auto_in_2_a_bits_mask),
.io_in_a_bits_corrupt (auto_in_2_a_bits_corrupt),
.io_in_b_ready (auto_in_2_b_ready),
.io_in_b_valid (auto_out_2_b_valid),
.io_in_b_bits_param (auto_out_2_b_bits_param),
.io_in_b_bits_source (auto_out_2_b_bits_source),
.io_in_b_bits_address (auto_out_2_b_bits_address),
.io_in_c_ready (auto_out_2_c_ready),
.io_in_c_valid (auto_in_2_c_valid),
.io_in_c_bits_opcode (auto_in_2_c_bits_opcode),
.io_in_c_bits_param (auto_in_2_c_bits_param),
.io_in_c_bits_size (auto_in_2_c_bits_size),
.io_in_c_bits_source (auto_in_2_c_bits_source),
.io_in_c_bits_address (auto_in_2_c_bits_address),
.io_in_c_bits_corrupt (auto_in_2_c_bits_corrupt),
.io_in_d_ready (auto_in_2_d_ready),
.io_in_d_valid (_nodeIn_d_q_2_io_deq_valid), // @[Decoupled.scala:362:21]
.io_in_d_bits_opcode (_nodeIn_d_q_2_io_deq_bits_opcode), // @[Decoupled.scala:362:21]
.io_in_d_bits_param (_nodeIn_d_q_2_io_deq_bits_param), // @[Decoupled.scala:362:21]
.io_in_d_bits_size (_nodeIn_d_q_2_io_deq_bits_size), // @[Decoupled.scala:362:21]
.io_in_d_bits_source (_nodeIn_d_q_2_io_deq_bits_source), // @[Decoupled.scala:362:21]
.io_in_d_bits_sink (_nodeIn_d_q_2_io_deq_bits_sink), // @[Decoupled.scala:362:21]
.io_in_d_bits_denied (_nodeIn_d_q_2_io_deq_bits_denied), // @[Decoupled.scala:362:21]
.io_in_d_bits_corrupt (_nodeIn_d_q_2_io_deq_bits_corrupt), // @[Decoupled.scala:362:21]
.io_in_e_valid (auto_in_2_e_valid),
.io_in_e_bits_sink (auto_in_2_e_bits_sink)
); // @[Nodes.scala:27:25]
TLMonitor_53 monitor_3 ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (_nodeOut_a_q_3_io_enq_ready), // @[Decoupled.scala:362:21]
.io_in_a_valid (auto_in_3_a_valid),
.io_in_a_bits_opcode (auto_in_3_a_bits_opcode),
.io_in_a_bits_param (auto_in_3_a_bits_param),
.io_in_a_bits_size (auto_in_3_a_bits_size),
.io_in_a_bits_source (auto_in_3_a_bits_source),
.io_in_a_bits_address (auto_in_3_a_bits_address),
.io_in_a_bits_mask (auto_in_3_a_bits_mask),
.io_in_a_bits_corrupt (auto_in_3_a_bits_corrupt),
.io_in_b_ready (auto_in_3_b_ready),
.io_in_b_valid (auto_out_3_b_valid),
.io_in_b_bits_param (auto_out_3_b_bits_param),
.io_in_b_bits_source (auto_out_3_b_bits_source),
.io_in_b_bits_address (auto_out_3_b_bits_address),
.io_in_c_ready (auto_out_3_c_ready),
.io_in_c_valid (auto_in_3_c_valid),
.io_in_c_bits_opcode (auto_in_3_c_bits_opcode),
.io_in_c_bits_param (auto_in_3_c_bits_param),
.io_in_c_bits_size (auto_in_3_c_bits_size),
.io_in_c_bits_source (auto_in_3_c_bits_source),
.io_in_c_bits_address (auto_in_3_c_bits_address),
.io_in_c_bits_corrupt (auto_in_3_c_bits_corrupt),
.io_in_d_ready (auto_in_3_d_ready),
.io_in_d_valid (_nodeIn_d_q_3_io_deq_valid), // @[Decoupled.scala:362:21]
.io_in_d_bits_opcode (_nodeIn_d_q_3_io_deq_bits_opcode), // @[Decoupled.scala:362:21]
.io_in_d_bits_param (_nodeIn_d_q_3_io_deq_bits_param), // @[Decoupled.scala:362:21]
.io_in_d_bits_size (_nodeIn_d_q_3_io_deq_bits_size), // @[Decoupled.scala:362:21]
.io_in_d_bits_source (_nodeIn_d_q_3_io_deq_bits_source), // @[Decoupled.scala:362:21]
.io_in_d_bits_sink (_nodeIn_d_q_3_io_deq_bits_sink), // @[Decoupled.scala:362:21]
.io_in_d_bits_denied (_nodeIn_d_q_3_io_deq_bits_denied), // @[Decoupled.scala:362:21]
.io_in_d_bits_corrupt (_nodeIn_d_q_3_io_deq_bits_corrupt), // @[Decoupled.scala:362:21]
.io_in_e_valid (auto_in_3_e_valid),
.io_in_e_bits_sink (auto_in_3_e_bits_sink)
); // @[Nodes.scala:27:25]
Queue1_TLBundleA_a32d64s6k3z3c nodeOut_a_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (_nodeOut_a_q_io_enq_ready),
.io_enq_valid (auto_in_0_a_valid),
.io_enq_bits_opcode (auto_in_0_a_bits_opcode),
.io_enq_bits_param (auto_in_0_a_bits_param),
.io_enq_bits_size (auto_in_0_a_bits_size),
.io_enq_bits_source (auto_in_0_a_bits_source),
.io_enq_bits_address (auto_in_0_a_bits_address),
.io_enq_bits_mask (auto_in_0_a_bits_mask),
.io_enq_bits_data (auto_in_0_a_bits_data),
.io_enq_bits_corrupt (auto_in_0_a_bits_corrupt),
.io_deq_ready (auto_out_0_a_ready),
.io_deq_valid (auto_out_0_a_valid),
.io_deq_bits_opcode (auto_out_0_a_bits_opcode),
.io_deq_bits_param (auto_out_0_a_bits_param),
.io_deq_bits_size (auto_out_0_a_bits_size),
.io_deq_bits_source (auto_out_0_a_bits_source),
.io_deq_bits_address (auto_out_0_a_bits_address),
.io_deq_bits_mask (auto_out_0_a_bits_mask),
.io_deq_bits_data (auto_out_0_a_bits_data),
.io_deq_bits_corrupt (auto_out_0_a_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue1_TLBundleD_a32d64s6k3z3c nodeIn_d_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (auto_out_0_d_ready),
.io_enq_valid (auto_out_0_d_valid),
.io_enq_bits_opcode (auto_out_0_d_bits_opcode),
.io_enq_bits_param (auto_out_0_d_bits_param),
.io_enq_bits_size (auto_out_0_d_bits_size),
.io_enq_bits_source (auto_out_0_d_bits_source),
.io_enq_bits_sink (auto_out_0_d_bits_sink),
.io_enq_bits_denied (auto_out_0_d_bits_denied),
.io_enq_bits_data (auto_out_0_d_bits_data),
.io_enq_bits_corrupt (auto_out_0_d_bits_corrupt),
.io_deq_ready (auto_in_0_d_ready),
.io_deq_valid (_nodeIn_d_q_io_deq_valid),
.io_deq_bits_opcode (_nodeIn_d_q_io_deq_bits_opcode),
.io_deq_bits_param (_nodeIn_d_q_io_deq_bits_param),
.io_deq_bits_size (_nodeIn_d_q_io_deq_bits_size),
.io_deq_bits_source (_nodeIn_d_q_io_deq_bits_source),
.io_deq_bits_sink (_nodeIn_d_q_io_deq_bits_sink),
.io_deq_bits_denied (_nodeIn_d_q_io_deq_bits_denied),
.io_deq_bits_data (auto_in_0_d_bits_data),
.io_deq_bits_corrupt (_nodeIn_d_q_io_deq_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue1_TLBundleA_a32d64s6k3z3c nodeOut_a_q_1 ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (_nodeOut_a_q_1_io_enq_ready),
.io_enq_valid (auto_in_1_a_valid),
.io_enq_bits_opcode (auto_in_1_a_bits_opcode),
.io_enq_bits_param (auto_in_1_a_bits_param),
.io_enq_bits_size (auto_in_1_a_bits_size),
.io_enq_bits_source (auto_in_1_a_bits_source),
.io_enq_bits_address (auto_in_1_a_bits_address),
.io_enq_bits_mask (auto_in_1_a_bits_mask),
.io_enq_bits_data (auto_in_1_a_bits_data),
.io_enq_bits_corrupt (auto_in_1_a_bits_corrupt),
.io_deq_ready (auto_out_1_a_ready),
.io_deq_valid (auto_out_1_a_valid),
.io_deq_bits_opcode (auto_out_1_a_bits_opcode),
.io_deq_bits_param (auto_out_1_a_bits_param),
.io_deq_bits_size (auto_out_1_a_bits_size),
.io_deq_bits_source (auto_out_1_a_bits_source),
.io_deq_bits_address (auto_out_1_a_bits_address),
.io_deq_bits_mask (auto_out_1_a_bits_mask),
.io_deq_bits_data (auto_out_1_a_bits_data),
.io_deq_bits_corrupt (auto_out_1_a_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue1_TLBundleD_a32d64s6k3z3c nodeIn_d_q_1 ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (auto_out_1_d_ready),
.io_enq_valid (auto_out_1_d_valid),
.io_enq_bits_opcode (auto_out_1_d_bits_opcode),
.io_enq_bits_param (auto_out_1_d_bits_param),
.io_enq_bits_size (auto_out_1_d_bits_size),
.io_enq_bits_source (auto_out_1_d_bits_source),
.io_enq_bits_sink (auto_out_1_d_bits_sink),
.io_enq_bits_denied (auto_out_1_d_bits_denied),
.io_enq_bits_data (auto_out_1_d_bits_data),
.io_enq_bits_corrupt (auto_out_1_d_bits_corrupt),
.io_deq_ready (auto_in_1_d_ready),
.io_deq_valid (_nodeIn_d_q_1_io_deq_valid),
.io_deq_bits_opcode (_nodeIn_d_q_1_io_deq_bits_opcode),
.io_deq_bits_param (_nodeIn_d_q_1_io_deq_bits_param),
.io_deq_bits_size (_nodeIn_d_q_1_io_deq_bits_size),
.io_deq_bits_source (_nodeIn_d_q_1_io_deq_bits_source),
.io_deq_bits_sink (_nodeIn_d_q_1_io_deq_bits_sink),
.io_deq_bits_denied (_nodeIn_d_q_1_io_deq_bits_denied),
.io_deq_bits_data (auto_in_1_d_bits_data),
.io_deq_bits_corrupt (_nodeIn_d_q_1_io_deq_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue1_TLBundleA_a32d64s6k3z3c nodeOut_a_q_2 ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (_nodeOut_a_q_2_io_enq_ready),
.io_enq_valid (auto_in_2_a_valid),
.io_enq_bits_opcode (auto_in_2_a_bits_opcode),
.io_enq_bits_param (auto_in_2_a_bits_param),
.io_enq_bits_size (auto_in_2_a_bits_size),
.io_enq_bits_source (auto_in_2_a_bits_source),
.io_enq_bits_address (auto_in_2_a_bits_address),
.io_enq_bits_mask (auto_in_2_a_bits_mask),
.io_enq_bits_data (auto_in_2_a_bits_data),
.io_enq_bits_corrupt (auto_in_2_a_bits_corrupt),
.io_deq_ready (auto_out_2_a_ready),
.io_deq_valid (auto_out_2_a_valid),
.io_deq_bits_opcode (auto_out_2_a_bits_opcode),
.io_deq_bits_param (auto_out_2_a_bits_param),
.io_deq_bits_size (auto_out_2_a_bits_size),
.io_deq_bits_source (auto_out_2_a_bits_source),
.io_deq_bits_address (auto_out_2_a_bits_address),
.io_deq_bits_mask (auto_out_2_a_bits_mask),
.io_deq_bits_data (auto_out_2_a_bits_data),
.io_deq_bits_corrupt (auto_out_2_a_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue1_TLBundleD_a32d64s6k3z3c nodeIn_d_q_2 ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (auto_out_2_d_ready),
.io_enq_valid (auto_out_2_d_valid),
.io_enq_bits_opcode (auto_out_2_d_bits_opcode),
.io_enq_bits_param (auto_out_2_d_bits_param),
.io_enq_bits_size (auto_out_2_d_bits_size),
.io_enq_bits_source (auto_out_2_d_bits_source),
.io_enq_bits_sink (auto_out_2_d_bits_sink),
.io_enq_bits_denied (auto_out_2_d_bits_denied),
.io_enq_bits_data (auto_out_2_d_bits_data),
.io_enq_bits_corrupt (auto_out_2_d_bits_corrupt),
.io_deq_ready (auto_in_2_d_ready),
.io_deq_valid (_nodeIn_d_q_2_io_deq_valid),
.io_deq_bits_opcode (_nodeIn_d_q_2_io_deq_bits_opcode),
.io_deq_bits_param (_nodeIn_d_q_2_io_deq_bits_param),
.io_deq_bits_size (_nodeIn_d_q_2_io_deq_bits_size),
.io_deq_bits_source (_nodeIn_d_q_2_io_deq_bits_source),
.io_deq_bits_sink (_nodeIn_d_q_2_io_deq_bits_sink),
.io_deq_bits_denied (_nodeIn_d_q_2_io_deq_bits_denied),
.io_deq_bits_data (auto_in_2_d_bits_data),
.io_deq_bits_corrupt (_nodeIn_d_q_2_io_deq_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue1_TLBundleA_a32d64s6k3z3c nodeOut_a_q_3 ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (_nodeOut_a_q_3_io_enq_ready),
.io_enq_valid (auto_in_3_a_valid),
.io_enq_bits_opcode (auto_in_3_a_bits_opcode),
.io_enq_bits_param (auto_in_3_a_bits_param),
.io_enq_bits_size (auto_in_3_a_bits_size),
.io_enq_bits_source (auto_in_3_a_bits_source),
.io_enq_bits_address (auto_in_3_a_bits_address),
.io_enq_bits_mask (auto_in_3_a_bits_mask),
.io_enq_bits_data (auto_in_3_a_bits_data),
.io_enq_bits_corrupt (auto_in_3_a_bits_corrupt),
.io_deq_ready (auto_out_3_a_ready),
.io_deq_valid (auto_out_3_a_valid),
.io_deq_bits_opcode (auto_out_3_a_bits_opcode),
.io_deq_bits_param (auto_out_3_a_bits_param),
.io_deq_bits_size (auto_out_3_a_bits_size),
.io_deq_bits_source (auto_out_3_a_bits_source),
.io_deq_bits_address (auto_out_3_a_bits_address),
.io_deq_bits_mask (auto_out_3_a_bits_mask),
.io_deq_bits_data (auto_out_3_a_bits_data),
.io_deq_bits_corrupt (auto_out_3_a_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue1_TLBundleD_a32d64s6k3z3c nodeIn_d_q_3 ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (auto_out_3_d_ready),
.io_enq_valid (auto_out_3_d_valid),
.io_enq_bits_opcode (auto_out_3_d_bits_opcode),
.io_enq_bits_param (auto_out_3_d_bits_param),
.io_enq_bits_size (auto_out_3_d_bits_size),
.io_enq_bits_source (auto_out_3_d_bits_source),
.io_enq_bits_sink (auto_out_3_d_bits_sink),
.io_enq_bits_denied (auto_out_3_d_bits_denied),
.io_enq_bits_data (auto_out_3_d_bits_data),
.io_enq_bits_corrupt (auto_out_3_d_bits_corrupt),
.io_deq_ready (auto_in_3_d_ready),
.io_deq_valid (_nodeIn_d_q_3_io_deq_valid),
.io_deq_bits_opcode (_nodeIn_d_q_3_io_deq_bits_opcode),
.io_deq_bits_param (_nodeIn_d_q_3_io_deq_bits_param),
.io_deq_bits_size (_nodeIn_d_q_3_io_deq_bits_size),
.io_deq_bits_source (_nodeIn_d_q_3_io_deq_bits_source),
.io_deq_bits_sink (_nodeIn_d_q_3_io_deq_bits_sink),
.io_deq_bits_denied (_nodeIn_d_q_3_io_deq_bits_denied),
.io_deq_bits_data (auto_in_3_d_bits_data),
.io_deq_bits_corrupt (_nodeIn_d_q_3_io_deq_bits_corrupt)
); // @[Decoupled.scala:362:21]
assign auto_in_3_a_ready = _nodeOut_a_q_3_io_enq_ready; // @[Decoupled.scala:362:21]
assign auto_in_3_b_valid = auto_out_3_b_valid; // @[Buffer.scala:40:9]
assign auto_in_3_b_bits_param = auto_out_3_b_bits_param; // @[Buffer.scala:40:9]
assign auto_in_3_b_bits_source = auto_out_3_b_bits_source; // @[Buffer.scala:40:9]
assign auto_in_3_b_bits_address = auto_out_3_b_bits_address; // @[Buffer.scala:40:9]
assign auto_in_3_c_ready = auto_out_3_c_ready; // @[Buffer.scala:40:9]
assign auto_in_3_d_valid = _nodeIn_d_q_3_io_deq_valid; // @[Decoupled.scala:362:21]
assign auto_in_3_d_bits_opcode = _nodeIn_d_q_3_io_deq_bits_opcode; // @[Decoupled.scala:362:21]
assign auto_in_3_d_bits_param = _nodeIn_d_q_3_io_deq_bits_param; // @[Decoupled.scala:362:21]
assign auto_in_3_d_bits_size = _nodeIn_d_q_3_io_deq_bits_size; // @[Decoupled.scala:362:21]
assign auto_in_3_d_bits_source = _nodeIn_d_q_3_io_deq_bits_source; // @[Decoupled.scala:362:21]
assign auto_in_3_d_bits_sink = _nodeIn_d_q_3_io_deq_bits_sink; // @[Decoupled.scala:362:21]
assign auto_in_3_d_bits_denied = _nodeIn_d_q_3_io_deq_bits_denied; // @[Decoupled.scala:362:21]
assign auto_in_3_d_bits_corrupt = _nodeIn_d_q_3_io_deq_bits_corrupt; // @[Decoupled.scala:362:21]
assign auto_in_2_a_ready = _nodeOut_a_q_2_io_enq_ready; // @[Decoupled.scala:362:21]
assign auto_in_2_b_valid = auto_out_2_b_valid; // @[Buffer.scala:40:9]
assign auto_in_2_b_bits_param = auto_out_2_b_bits_param; // @[Buffer.scala:40:9]
assign auto_in_2_b_bits_source = auto_out_2_b_bits_source; // @[Buffer.scala:40:9]
assign auto_in_2_b_bits_address = auto_out_2_b_bits_address; // @[Buffer.scala:40:9]
assign auto_in_2_c_ready = auto_out_2_c_ready; // @[Buffer.scala:40:9]
assign auto_in_2_d_valid = _nodeIn_d_q_2_io_deq_valid; // @[Decoupled.scala:362:21]
assign auto_in_2_d_bits_opcode = _nodeIn_d_q_2_io_deq_bits_opcode; // @[Decoupled.scala:362:21]
assign auto_in_2_d_bits_param = _nodeIn_d_q_2_io_deq_bits_param; // @[Decoupled.scala:362:21]
assign auto_in_2_d_bits_size = _nodeIn_d_q_2_io_deq_bits_size; // @[Decoupled.scala:362:21]
assign auto_in_2_d_bits_source = _nodeIn_d_q_2_io_deq_bits_source; // @[Decoupled.scala:362:21]
assign auto_in_2_d_bits_sink = _nodeIn_d_q_2_io_deq_bits_sink; // @[Decoupled.scala:362:21]
assign auto_in_2_d_bits_denied = _nodeIn_d_q_2_io_deq_bits_denied; // @[Decoupled.scala:362:21]
assign auto_in_2_d_bits_corrupt = _nodeIn_d_q_2_io_deq_bits_corrupt; // @[Decoupled.scala:362:21]
assign auto_in_1_a_ready = _nodeOut_a_q_1_io_enq_ready; // @[Decoupled.scala:362:21]
assign auto_in_1_b_valid = auto_out_1_b_valid; // @[Buffer.scala:40:9]
assign auto_in_1_b_bits_param = auto_out_1_b_bits_param; // @[Buffer.scala:40:9]
assign auto_in_1_b_bits_source = auto_out_1_b_bits_source; // @[Buffer.scala:40:9]
assign auto_in_1_b_bits_address = auto_out_1_b_bits_address; // @[Buffer.scala:40:9]
assign auto_in_1_c_ready = auto_out_1_c_ready; // @[Buffer.scala:40:9]
assign auto_in_1_d_valid = _nodeIn_d_q_1_io_deq_valid; // @[Decoupled.scala:362:21]
assign auto_in_1_d_bits_opcode = _nodeIn_d_q_1_io_deq_bits_opcode; // @[Decoupled.scala:362:21]
assign auto_in_1_d_bits_param = _nodeIn_d_q_1_io_deq_bits_param; // @[Decoupled.scala:362:21]
assign auto_in_1_d_bits_size = _nodeIn_d_q_1_io_deq_bits_size; // @[Decoupled.scala:362:21]
assign auto_in_1_d_bits_source = _nodeIn_d_q_1_io_deq_bits_source; // @[Decoupled.scala:362:21]
assign auto_in_1_d_bits_sink = _nodeIn_d_q_1_io_deq_bits_sink; // @[Decoupled.scala:362:21]
assign auto_in_1_d_bits_denied = _nodeIn_d_q_1_io_deq_bits_denied; // @[Decoupled.scala:362:21]
assign auto_in_1_d_bits_corrupt = _nodeIn_d_q_1_io_deq_bits_corrupt; // @[Decoupled.scala:362:21]
assign auto_in_0_a_ready = _nodeOut_a_q_io_enq_ready; // @[Decoupled.scala:362:21]
assign auto_in_0_b_valid = auto_out_0_b_valid; // @[Buffer.scala:40:9]
assign auto_in_0_b_bits_param = auto_out_0_b_bits_param; // @[Buffer.scala:40:9]
assign auto_in_0_b_bits_source = auto_out_0_b_bits_source; // @[Buffer.scala:40:9]
assign auto_in_0_b_bits_address = auto_out_0_b_bits_address; // @[Buffer.scala:40:9]
assign auto_in_0_c_ready = auto_out_0_c_ready; // @[Buffer.scala:40:9]
assign auto_in_0_d_valid = _nodeIn_d_q_io_deq_valid; // @[Decoupled.scala:362:21]
assign auto_in_0_d_bits_opcode = _nodeIn_d_q_io_deq_bits_opcode; // @[Decoupled.scala:362:21]
assign auto_in_0_d_bits_param = _nodeIn_d_q_io_deq_bits_param; // @[Decoupled.scala:362:21]
assign auto_in_0_d_bits_size = _nodeIn_d_q_io_deq_bits_size; // @[Decoupled.scala:362:21]
assign auto_in_0_d_bits_source = _nodeIn_d_q_io_deq_bits_source; // @[Decoupled.scala:362:21]
assign auto_in_0_d_bits_sink = _nodeIn_d_q_io_deq_bits_sink; // @[Decoupled.scala:362:21]
assign auto_in_0_d_bits_denied = _nodeIn_d_q_io_deq_bits_denied; // @[Decoupled.scala:362:21]
assign auto_in_0_d_bits_corrupt = _nodeIn_d_q_io_deq_bits_corrupt; // @[Decoupled.scala:362:21]
assign auto_out_3_b_ready = auto_in_3_b_ready; // @[Buffer.scala:40:9]
assign auto_out_3_c_valid = auto_in_3_c_valid; // @[Buffer.scala:40:9]
assign auto_out_3_c_bits_opcode = auto_in_3_c_bits_opcode; // @[Buffer.scala:40:9]
assign auto_out_3_c_bits_param = auto_in_3_c_bits_param; // @[Buffer.scala:40:9]
assign auto_out_3_c_bits_size = auto_in_3_c_bits_size; // @[Buffer.scala:40:9]
assign auto_out_3_c_bits_source = auto_in_3_c_bits_source; // @[Buffer.scala:40:9]
assign auto_out_3_c_bits_address = auto_in_3_c_bits_address; // @[Buffer.scala:40:9]
assign auto_out_3_c_bits_data = auto_in_3_c_bits_data; // @[Buffer.scala:40:9]
assign auto_out_3_c_bits_corrupt = auto_in_3_c_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_3_e_valid = auto_in_3_e_valid; // @[Buffer.scala:40:9]
assign auto_out_3_e_bits_sink = auto_in_3_e_bits_sink; // @[Buffer.scala:40:9]
assign auto_out_2_b_ready = auto_in_2_b_ready; // @[Buffer.scala:40:9]
assign auto_out_2_c_valid = auto_in_2_c_valid; // @[Buffer.scala:40:9]
assign auto_out_2_c_bits_opcode = auto_in_2_c_bits_opcode; // @[Buffer.scala:40:9]
assign auto_out_2_c_bits_param = auto_in_2_c_bits_param; // @[Buffer.scala:40:9]
assign auto_out_2_c_bits_size = auto_in_2_c_bits_size; // @[Buffer.scala:40:9]
assign auto_out_2_c_bits_source = auto_in_2_c_bits_source; // @[Buffer.scala:40:9]
assign auto_out_2_c_bits_address = auto_in_2_c_bits_address; // @[Buffer.scala:40:9]
assign auto_out_2_c_bits_data = auto_in_2_c_bits_data; // @[Buffer.scala:40:9]
assign auto_out_2_c_bits_corrupt = auto_in_2_c_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_2_e_valid = auto_in_2_e_valid; // @[Buffer.scala:40:9]
assign auto_out_2_e_bits_sink = auto_in_2_e_bits_sink; // @[Buffer.scala:40:9]
assign auto_out_1_b_ready = auto_in_1_b_ready; // @[Buffer.scala:40:9]
assign auto_out_1_c_valid = auto_in_1_c_valid; // @[Buffer.scala:40:9]
assign auto_out_1_c_bits_opcode = auto_in_1_c_bits_opcode; // @[Buffer.scala:40:9]
assign auto_out_1_c_bits_param = auto_in_1_c_bits_param; // @[Buffer.scala:40:9]
assign auto_out_1_c_bits_size = auto_in_1_c_bits_size; // @[Buffer.scala:40:9]
assign auto_out_1_c_bits_source = auto_in_1_c_bits_source; // @[Buffer.scala:40:9]
assign auto_out_1_c_bits_address = auto_in_1_c_bits_address; // @[Buffer.scala:40:9]
assign auto_out_1_c_bits_data = auto_in_1_c_bits_data; // @[Buffer.scala:40:9]
assign auto_out_1_c_bits_corrupt = auto_in_1_c_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_1_e_valid = auto_in_1_e_valid; // @[Buffer.scala:40:9]
assign auto_out_1_e_bits_sink = auto_in_1_e_bits_sink; // @[Buffer.scala:40:9]
assign auto_out_0_b_ready = auto_in_0_b_ready; // @[Buffer.scala:40:9]
assign auto_out_0_c_valid = auto_in_0_c_valid; // @[Buffer.scala:40:9]
assign auto_out_0_c_bits_opcode = auto_in_0_c_bits_opcode; // @[Buffer.scala:40:9]
assign auto_out_0_c_bits_param = auto_in_0_c_bits_param; // @[Buffer.scala:40:9]
assign auto_out_0_c_bits_size = auto_in_0_c_bits_size; // @[Buffer.scala:40:9]
assign auto_out_0_c_bits_source = auto_in_0_c_bits_source; // @[Buffer.scala:40:9]
assign auto_out_0_c_bits_address = auto_in_0_c_bits_address; // @[Buffer.scala:40:9]
assign auto_out_0_c_bits_data = auto_in_0_c_bits_data; // @[Buffer.scala:40:9]
assign auto_out_0_c_bits_corrupt = auto_in_0_c_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_0_e_valid = auto_in_0_e_valid; // @[Buffer.scala:40:9]
assign auto_out_0_e_bits_sink = auto_in_0_e_bits_sink; // @[Buffer.scala:40:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File LCG.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.Cat
/** A 16-bit psuedo-random generator based on a linear conguential
* generator (LCG). The state is stored in an unitialised register.
* When using the C++ backend, it is straigtforward to arrange a
* random initial value for each uninitialised register, effectively
* seeding each LCG16 instance with a different seed.
*/
class LCG16 extends Module {
val io = IO(new Bundle {
val out = Output(UInt(16.W))
val inc = Input(Bool())
})
val state = Reg(UInt(32.W))
when (io.inc) {
state := state * 1103515245.U(32.W) + 12345.U(32.W)
}
io.out := state(30, 15)
}
/** An n-bit psuedo-random generator made from many instances of a
* 16-bit LCG. Parameter 'width' must be larger than 0.
*/
class LCG(val w: Int) extends Module {
val io = IO(new Bundle {
val out = Output(UInt(w.W))
val inc = Input(Bool())
})
require(w > 0)
val numLCG16s : Int = (w+15)/16
val outs = Seq.fill(numLCG16s) { LCG16(io.inc) }
io.out := Cat(outs)
}
object LCG16 {
def apply(inc: Bool = true.B): UInt = {
val lcg = Module(new LCG16)
lcg.io.inc := inc
lcg.io.out
}
}
object LCG {
def apply(w: Int, inc: Bool = true.B): UInt = {
val lcg = Module(new LCG(w))
lcg.io.inc := inc
lcg.io.out
}
}
| module LCG_5( // @[LCG.scala:30:7]
input clock, // @[LCG.scala:30:7]
input reset, // @[LCG.scala:30:7]
output [3:0] io_out // @[LCG.scala:31:14]
);
wire [15:0] _outs_lcg_io_out; // @[LCG.scala:43:21]
wire io_inc = 1'h1; // @[LCG.scala:30:7, :31:14, :43:21]
wire [3:0] io_out_0; // @[LCG.scala:30:7]
assign io_out_0 = _outs_lcg_io_out[3:0]; // @[LCG.scala:30:7, :38:10, :43:21]
LCG16_7 outs_lcg ( // @[LCG.scala:43:21]
.clock (clock),
.reset (reset),
.io_out (_outs_lcg_io_out)
); // @[LCG.scala:43:21]
assign io_out = io_out_0; // @[LCG.scala:30:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_133( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_154 io_out_sink_valid_1 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module TLBuffer_a32d64s3k3z4c_1( // @[Buffer.scala:40:9]
input clock, // @[Buffer.scala:40:9]
input reset, // @[Buffer.scala:40:9]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_b_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_b_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_b_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_b_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_b_bits_size, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_b_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_in_b_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_in_b_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_b_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_b_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_in_c_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_c_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_c_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_c_bits_size, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_c_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_c_bits_address, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_c_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_in_e_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_e_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_e_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_b_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_b_valid, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_b_bits_param, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_out_b_bits_address, // @[LazyModuleImp.scala:107:25]
input auto_out_c_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_c_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_c_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_c_bits_size, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_c_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_c_bits_address, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_c_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_e_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_e_bits_sink // @[LazyModuleImp.scala:107:25]
);
wire auto_in_a_valid_0 = auto_in_a_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_opcode_0 = auto_in_a_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_param_0 = auto_in_a_bits_param; // @[Buffer.scala:40:9]
wire [3:0] auto_in_a_bits_size_0 = auto_in_a_bits_size; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_source_0 = auto_in_a_bits_source; // @[Buffer.scala:40:9]
wire [31:0] auto_in_a_bits_address_0 = auto_in_a_bits_address; // @[Buffer.scala:40:9]
wire [7:0] auto_in_a_bits_mask_0 = auto_in_a_bits_mask; // @[Buffer.scala:40:9]
wire [63:0] auto_in_a_bits_data_0 = auto_in_a_bits_data; // @[Buffer.scala:40:9]
wire auto_in_b_ready_0 = auto_in_b_ready; // @[Buffer.scala:40:9]
wire auto_in_c_valid_0 = auto_in_c_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_in_c_bits_opcode_0 = auto_in_c_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] auto_in_c_bits_param_0 = auto_in_c_bits_param; // @[Buffer.scala:40:9]
wire [3:0] auto_in_c_bits_size_0 = auto_in_c_bits_size; // @[Buffer.scala:40:9]
wire [2:0] auto_in_c_bits_source_0 = auto_in_c_bits_source; // @[Buffer.scala:40:9]
wire [31:0] auto_in_c_bits_address_0 = auto_in_c_bits_address; // @[Buffer.scala:40:9]
wire [63:0] auto_in_c_bits_data_0 = auto_in_c_bits_data; // @[Buffer.scala:40:9]
wire auto_in_d_ready_0 = auto_in_d_ready; // @[Buffer.scala:40:9]
wire auto_in_e_valid_0 = auto_in_e_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_in_e_bits_sink_0 = auto_in_e_bits_sink; // @[Buffer.scala:40:9]
wire auto_out_a_ready_0 = auto_out_a_ready; // @[Buffer.scala:40:9]
wire auto_out_b_valid_0 = auto_out_b_valid; // @[Buffer.scala:40:9]
wire [1:0] auto_out_b_bits_param_0 = auto_out_b_bits_param; // @[Buffer.scala:40:9]
wire [31:0] auto_out_b_bits_address_0 = auto_out_b_bits_address; // @[Buffer.scala:40:9]
wire auto_out_c_ready_0 = auto_out_c_ready; // @[Buffer.scala:40:9]
wire auto_out_d_valid_0 = auto_out_d_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_out_d_bits_opcode_0 = auto_out_d_bits_opcode; // @[Buffer.scala:40:9]
wire [1:0] auto_out_d_bits_param_0 = auto_out_d_bits_param; // @[Buffer.scala:40:9]
wire [3:0] auto_out_d_bits_size_0 = auto_out_d_bits_size; // @[Buffer.scala:40:9]
wire [2:0] auto_out_d_bits_source_0 = auto_out_d_bits_source; // @[Buffer.scala:40:9]
wire [2:0] auto_out_d_bits_sink_0 = auto_out_d_bits_sink; // @[Buffer.scala:40:9]
wire auto_out_d_bits_denied_0 = auto_out_d_bits_denied; // @[Buffer.scala:40:9]
wire [63:0] auto_out_d_bits_data_0 = auto_out_d_bits_data; // @[Buffer.scala:40:9]
wire auto_out_d_bits_corrupt_0 = auto_out_d_bits_corrupt; // @[Buffer.scala:40:9]
wire auto_out_e_ready = 1'h1; // @[Decoupled.scala:362:21]
wire nodeOut_e_ready = 1'h1; // @[Decoupled.scala:362:21]
wire [63:0] auto_out_b_bits_data = 64'h0; // @[Decoupled.scala:362:21]
wire [63:0] nodeOut_b_bits_data = 64'h0; // @[Decoupled.scala:362:21]
wire [7:0] auto_out_b_bits_mask = 8'hFF; // @[Decoupled.scala:362:21]
wire [7:0] nodeOut_b_bits_mask = 8'hFF; // @[Decoupled.scala:362:21]
wire [2:0] auto_out_b_bits_source = 3'h0; // @[Decoupled.scala:362:21]
wire [2:0] nodeOut_b_bits_source = 3'h0; // @[Decoupled.scala:362:21]
wire [3:0] auto_out_b_bits_size = 4'h6; // @[Decoupled.scala:362:21]
wire [3:0] nodeOut_b_bits_size = 4'h6; // @[Decoupled.scala:362:21]
wire [2:0] auto_out_b_bits_opcode = 3'h6; // @[Decoupled.scala:362:21]
wire [2:0] nodeOut_b_bits_opcode = 3'h6; // @[Decoupled.scala:362:21]
wire auto_in_a_bits_corrupt = 1'h0; // @[Decoupled.scala:362:21]
wire auto_in_c_bits_corrupt = 1'h0; // @[Decoupled.scala:362:21]
wire auto_out_b_bits_corrupt = 1'h0; // @[Decoupled.scala:362:21]
wire nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire nodeIn_a_bits_corrupt = 1'h0; // @[Decoupled.scala:362:21]
wire nodeIn_c_bits_corrupt = 1'h0; // @[Decoupled.scala:362:21]
wire nodeOut_b_bits_corrupt = 1'h0; // @[Decoupled.scala:362:21]
wire nodeIn_a_valid = auto_in_a_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_opcode = auto_in_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_param = auto_in_a_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] nodeIn_a_bits_size = auto_in_a_bits_size_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_source = auto_in_a_bits_source_0; // @[Buffer.scala:40:9]
wire [31:0] nodeIn_a_bits_address = auto_in_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] nodeIn_a_bits_mask = auto_in_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] nodeIn_a_bits_data = auto_in_a_bits_data_0; // @[Buffer.scala:40:9]
wire nodeIn_b_ready = auto_in_b_ready_0; // @[Buffer.scala:40:9]
wire nodeIn_b_valid; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_b_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_b_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] nodeIn_b_bits_size; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_b_bits_source; // @[MixedNode.scala:551:17]
wire [31:0] nodeIn_b_bits_address; // @[MixedNode.scala:551:17]
wire [7:0] nodeIn_b_bits_mask; // @[MixedNode.scala:551:17]
wire [63:0] nodeIn_b_bits_data; // @[MixedNode.scala:551:17]
wire nodeIn_b_bits_corrupt; // @[MixedNode.scala:551:17]
wire nodeIn_c_ready; // @[MixedNode.scala:551:17]
wire nodeIn_c_valid = auto_in_c_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_c_bits_opcode = auto_in_c_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_c_bits_param = auto_in_c_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] nodeIn_c_bits_size = auto_in_c_bits_size_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_c_bits_source = auto_in_c_bits_source_0; // @[Buffer.scala:40:9]
wire [31:0] nodeIn_c_bits_address = auto_in_c_bits_address_0; // @[Buffer.scala:40:9]
wire [63:0] nodeIn_c_bits_data = auto_in_c_bits_data_0; // @[Buffer.scala:40:9]
wire nodeIn_d_ready = auto_in_d_ready_0; // @[Buffer.scala:40:9]
wire nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire nodeIn_e_ready; // @[MixedNode.scala:551:17]
wire nodeIn_e_valid = auto_in_e_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_e_bits_sink = auto_in_e_bits_sink_0; // @[Buffer.scala:40:9]
wire nodeOut_a_ready = auto_out_a_ready_0; // @[Buffer.scala:40:9]
wire nodeOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire nodeOut_b_ready; // @[MixedNode.scala:542:17]
wire nodeOut_b_valid = auto_out_b_valid_0; // @[Buffer.scala:40:9]
wire [1:0] nodeOut_b_bits_param = auto_out_b_bits_param_0; // @[Buffer.scala:40:9]
wire [31:0] nodeOut_b_bits_address = auto_out_b_bits_address_0; // @[Buffer.scala:40:9]
wire nodeOut_c_ready = auto_out_c_ready_0; // @[Buffer.scala:40:9]
wire nodeOut_c_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_c_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_c_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_c_bits_size; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_c_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] nodeOut_c_bits_address; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_c_bits_data; // @[MixedNode.scala:542:17]
wire nodeOut_c_bits_corrupt; // @[MixedNode.scala:542:17]
wire nodeOut_d_ready; // @[MixedNode.scala:542:17]
wire nodeOut_d_valid = auto_out_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeOut_d_bits_opcode = auto_out_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [1:0] nodeOut_d_bits_param = auto_out_d_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] nodeOut_d_bits_size = auto_out_d_bits_size_0; // @[Buffer.scala:40:9]
wire [2:0] nodeOut_d_bits_source = auto_out_d_bits_source_0; // @[Buffer.scala:40:9]
wire [2:0] nodeOut_d_bits_sink = auto_out_d_bits_sink_0; // @[Buffer.scala:40:9]
wire nodeOut_d_bits_denied = auto_out_d_bits_denied_0; // @[Buffer.scala:40:9]
wire [63:0] nodeOut_d_bits_data = auto_out_d_bits_data_0; // @[Buffer.scala:40:9]
wire nodeOut_d_bits_corrupt = auto_out_d_bits_corrupt_0; // @[Buffer.scala:40:9]
wire nodeOut_e_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_e_bits_sink; // @[MixedNode.scala:542:17]
wire auto_in_a_ready_0; // @[Buffer.scala:40:9]
wire [2:0] auto_in_b_bits_opcode_0; // @[Buffer.scala:40:9]
wire [1:0] auto_in_b_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] auto_in_b_bits_size_0; // @[Buffer.scala:40:9]
wire [2:0] auto_in_b_bits_source_0; // @[Buffer.scala:40:9]
wire [31:0] auto_in_b_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] auto_in_b_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] auto_in_b_bits_data_0; // @[Buffer.scala:40:9]
wire auto_in_b_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_in_b_valid_0; // @[Buffer.scala:40:9]
wire auto_in_c_ready_0; // @[Buffer.scala:40:9]
wire [2:0] auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [1:0] auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
wire [2:0] auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
wire [2:0] auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
wire [63:0] auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_in_d_valid_0; // @[Buffer.scala:40:9]
wire auto_in_e_ready_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
wire [31:0] auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
wire auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_out_a_valid_0; // @[Buffer.scala:40:9]
wire auto_out_b_ready_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_c_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_c_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] auto_out_c_bits_size_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_c_bits_source_0; // @[Buffer.scala:40:9]
wire [31:0] auto_out_c_bits_address_0; // @[Buffer.scala:40:9]
wire [63:0] auto_out_c_bits_data_0; // @[Buffer.scala:40:9]
wire auto_out_c_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_out_c_valid_0; // @[Buffer.scala:40:9]
wire auto_out_d_ready_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_e_bits_sink_0; // @[Buffer.scala:40:9]
wire auto_out_e_valid_0; // @[Buffer.scala:40:9]
assign auto_in_a_ready_0 = nodeIn_a_ready; // @[Buffer.scala:40:9]
assign auto_in_b_valid_0 = nodeIn_b_valid; // @[Buffer.scala:40:9]
assign auto_in_b_bits_opcode_0 = nodeIn_b_bits_opcode; // @[Buffer.scala:40:9]
assign auto_in_b_bits_param_0 = nodeIn_b_bits_param; // @[Buffer.scala:40:9]
assign auto_in_b_bits_size_0 = nodeIn_b_bits_size; // @[Buffer.scala:40:9]
assign auto_in_b_bits_source_0 = nodeIn_b_bits_source; // @[Buffer.scala:40:9]
assign auto_in_b_bits_address_0 = nodeIn_b_bits_address; // @[Buffer.scala:40:9]
assign auto_in_b_bits_mask_0 = nodeIn_b_bits_mask; // @[Buffer.scala:40:9]
assign auto_in_b_bits_data_0 = nodeIn_b_bits_data; // @[Buffer.scala:40:9]
assign auto_in_b_bits_corrupt_0 = nodeIn_b_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_in_c_ready_0 = nodeIn_c_ready; // @[Buffer.scala:40:9]
assign auto_in_d_valid_0 = nodeIn_d_valid; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode_0 = nodeIn_d_bits_opcode; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param_0 = nodeIn_d_bits_param; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size_0 = nodeIn_d_bits_size; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source_0 = nodeIn_d_bits_source; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink_0 = nodeIn_d_bits_sink; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied_0 = nodeIn_d_bits_denied; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data_0 = nodeIn_d_bits_data; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt_0 = nodeIn_d_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_in_e_ready_0 = nodeIn_e_ready; // @[Buffer.scala:40:9]
assign auto_out_a_valid_0 = nodeOut_a_valid; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode_0 = nodeOut_a_bits_opcode; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param_0 = nodeOut_a_bits_param; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size_0 = nodeOut_a_bits_size; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source_0 = nodeOut_a_bits_source; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address_0 = nodeOut_a_bits_address; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask_0 = nodeOut_a_bits_mask; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data_0 = nodeOut_a_bits_data; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt_0 = nodeOut_a_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_b_ready_0 = nodeOut_b_ready; // @[Buffer.scala:40:9]
assign auto_out_c_valid_0 = nodeOut_c_valid; // @[Buffer.scala:40:9]
assign auto_out_c_bits_opcode_0 = nodeOut_c_bits_opcode; // @[Buffer.scala:40:9]
assign auto_out_c_bits_param_0 = nodeOut_c_bits_param; // @[Buffer.scala:40:9]
assign auto_out_c_bits_size_0 = nodeOut_c_bits_size; // @[Buffer.scala:40:9]
assign auto_out_c_bits_source_0 = nodeOut_c_bits_source; // @[Buffer.scala:40:9]
assign auto_out_c_bits_address_0 = nodeOut_c_bits_address; // @[Buffer.scala:40:9]
assign auto_out_c_bits_data_0 = nodeOut_c_bits_data; // @[Buffer.scala:40:9]
assign auto_out_c_bits_corrupt_0 = nodeOut_c_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_d_ready_0 = nodeOut_d_ready; // @[Buffer.scala:40:9]
assign auto_out_e_valid_0 = nodeOut_e_valid; // @[Buffer.scala:40:9]
assign auto_out_e_bits_sink_0 = nodeOut_e_bits_sink; // @[Buffer.scala:40:9]
TLMonitor_46 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (nodeIn_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_b_ready (nodeIn_b_ready), // @[MixedNode.scala:551:17]
.io_in_b_valid (nodeIn_b_valid), // @[MixedNode.scala:551:17]
.io_in_b_bits_opcode (nodeIn_b_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_b_bits_param (nodeIn_b_bits_param), // @[MixedNode.scala:551:17]
.io_in_b_bits_size (nodeIn_b_bits_size), // @[MixedNode.scala:551:17]
.io_in_b_bits_source (nodeIn_b_bits_source), // @[MixedNode.scala:551:17]
.io_in_b_bits_address (nodeIn_b_bits_address), // @[MixedNode.scala:551:17]
.io_in_b_bits_mask (nodeIn_b_bits_mask), // @[MixedNode.scala:551:17]
.io_in_b_bits_data (nodeIn_b_bits_data), // @[MixedNode.scala:551:17]
.io_in_b_bits_corrupt (nodeIn_b_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_c_ready (nodeIn_c_ready), // @[MixedNode.scala:551:17]
.io_in_c_valid (nodeIn_c_valid), // @[MixedNode.scala:551:17]
.io_in_c_bits_opcode (nodeIn_c_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_c_bits_param (nodeIn_c_bits_param), // @[MixedNode.scala:551:17]
.io_in_c_bits_size (nodeIn_c_bits_size), // @[MixedNode.scala:551:17]
.io_in_c_bits_source (nodeIn_c_bits_source), // @[MixedNode.scala:551:17]
.io_in_c_bits_address (nodeIn_c_bits_address), // @[MixedNode.scala:551:17]
.io_in_c_bits_data (nodeIn_c_bits_data), // @[MixedNode.scala:551:17]
.io_in_d_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (nodeIn_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (nodeIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_param (nodeIn_d_bits_param), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (nodeIn_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (nodeIn_d_bits_source), // @[MixedNode.scala:551:17]
.io_in_d_bits_sink (nodeIn_d_bits_sink), // @[MixedNode.scala:551:17]
.io_in_d_bits_denied (nodeIn_d_bits_denied), // @[MixedNode.scala:551:17]
.io_in_d_bits_data (nodeIn_d_bits_data), // @[MixedNode.scala:551:17]
.io_in_d_bits_corrupt (nodeIn_d_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_e_ready (nodeIn_e_ready), // @[MixedNode.scala:551:17]
.io_in_e_valid (nodeIn_e_valid), // @[MixedNode.scala:551:17]
.io_in_e_bits_sink (nodeIn_e_bits_sink) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
Queue2_TLBundleA_a32d64s3k3z4c nodeOut_a_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeIn_a_ready),
.io_enq_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_enq_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_enq_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_enq_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_enq_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_enq_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_enq_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_enq_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_deq_ready (nodeOut_a_ready), // @[MixedNode.scala:542:17]
.io_deq_valid (nodeOut_a_valid),
.io_deq_bits_opcode (nodeOut_a_bits_opcode),
.io_deq_bits_param (nodeOut_a_bits_param),
.io_deq_bits_size (nodeOut_a_bits_size),
.io_deq_bits_source (nodeOut_a_bits_source),
.io_deq_bits_address (nodeOut_a_bits_address),
.io_deq_bits_mask (nodeOut_a_bits_mask),
.io_deq_bits_data (nodeOut_a_bits_data),
.io_deq_bits_corrupt (nodeOut_a_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue2_TLBundleD_a32d64s3k3z4c nodeIn_d_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeOut_d_ready),
.io_enq_valid (nodeOut_d_valid), // @[MixedNode.scala:542:17]
.io_enq_bits_opcode (nodeOut_d_bits_opcode), // @[MixedNode.scala:542:17]
.io_enq_bits_param (nodeOut_d_bits_param), // @[MixedNode.scala:542:17]
.io_enq_bits_size (nodeOut_d_bits_size), // @[MixedNode.scala:542:17]
.io_enq_bits_source (nodeOut_d_bits_source), // @[MixedNode.scala:542:17]
.io_enq_bits_sink (nodeOut_d_bits_sink), // @[MixedNode.scala:542:17]
.io_enq_bits_denied (nodeOut_d_bits_denied), // @[MixedNode.scala:542:17]
.io_enq_bits_data (nodeOut_d_bits_data), // @[MixedNode.scala:542:17]
.io_enq_bits_corrupt (nodeOut_d_bits_corrupt), // @[MixedNode.scala:542:17]
.io_deq_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_deq_valid (nodeIn_d_valid),
.io_deq_bits_opcode (nodeIn_d_bits_opcode),
.io_deq_bits_param (nodeIn_d_bits_param),
.io_deq_bits_size (nodeIn_d_bits_size),
.io_deq_bits_source (nodeIn_d_bits_source),
.io_deq_bits_sink (nodeIn_d_bits_sink),
.io_deq_bits_denied (nodeIn_d_bits_denied),
.io_deq_bits_data (nodeIn_d_bits_data),
.io_deq_bits_corrupt (nodeIn_d_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue2_TLBundleB_a32d64s3k3z4c nodeIn_b_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeOut_b_ready),
.io_enq_valid (nodeOut_b_valid), // @[MixedNode.scala:542:17]
.io_enq_bits_param (nodeOut_b_bits_param), // @[MixedNode.scala:542:17]
.io_enq_bits_address (nodeOut_b_bits_address), // @[MixedNode.scala:542:17]
.io_deq_ready (nodeIn_b_ready), // @[MixedNode.scala:551:17]
.io_deq_valid (nodeIn_b_valid),
.io_deq_bits_opcode (nodeIn_b_bits_opcode),
.io_deq_bits_param (nodeIn_b_bits_param),
.io_deq_bits_size (nodeIn_b_bits_size),
.io_deq_bits_source (nodeIn_b_bits_source),
.io_deq_bits_address (nodeIn_b_bits_address),
.io_deq_bits_mask (nodeIn_b_bits_mask),
.io_deq_bits_data (nodeIn_b_bits_data),
.io_deq_bits_corrupt (nodeIn_b_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue2_TLBundleC_a32d64s3k3z4c nodeOut_c_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeIn_c_ready),
.io_enq_valid (nodeIn_c_valid), // @[MixedNode.scala:551:17]
.io_enq_bits_opcode (nodeIn_c_bits_opcode), // @[MixedNode.scala:551:17]
.io_enq_bits_param (nodeIn_c_bits_param), // @[MixedNode.scala:551:17]
.io_enq_bits_size (nodeIn_c_bits_size), // @[MixedNode.scala:551:17]
.io_enq_bits_source (nodeIn_c_bits_source), // @[MixedNode.scala:551:17]
.io_enq_bits_address (nodeIn_c_bits_address), // @[MixedNode.scala:551:17]
.io_enq_bits_data (nodeIn_c_bits_data), // @[MixedNode.scala:551:17]
.io_deq_ready (nodeOut_c_ready), // @[MixedNode.scala:542:17]
.io_deq_valid (nodeOut_c_valid),
.io_deq_bits_opcode (nodeOut_c_bits_opcode),
.io_deq_bits_param (nodeOut_c_bits_param),
.io_deq_bits_size (nodeOut_c_bits_size),
.io_deq_bits_source (nodeOut_c_bits_source),
.io_deq_bits_address (nodeOut_c_bits_address),
.io_deq_bits_data (nodeOut_c_bits_data),
.io_deq_bits_corrupt (nodeOut_c_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue2_TLBundleE_a32d64s3k3z4c nodeOut_e_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeIn_e_ready),
.io_enq_valid (nodeIn_e_valid), // @[MixedNode.scala:551:17]
.io_enq_bits_sink (nodeIn_e_bits_sink), // @[MixedNode.scala:551:17]
.io_deq_valid (nodeOut_e_valid),
.io_deq_bits_sink (nodeOut_e_bits_sink)
); // @[Decoupled.scala:362:21]
assign auto_in_a_ready = auto_in_a_ready_0; // @[Buffer.scala:40:9]
assign auto_in_b_valid = auto_in_b_valid_0; // @[Buffer.scala:40:9]
assign auto_in_b_bits_opcode = auto_in_b_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_in_b_bits_param = auto_in_b_bits_param_0; // @[Buffer.scala:40:9]
assign auto_in_b_bits_size = auto_in_b_bits_size_0; // @[Buffer.scala:40:9]
assign auto_in_b_bits_source = auto_in_b_bits_source_0; // @[Buffer.scala:40:9]
assign auto_in_b_bits_address = auto_in_b_bits_address_0; // @[Buffer.scala:40:9]
assign auto_in_b_bits_mask = auto_in_b_bits_mask_0; // @[Buffer.scala:40:9]
assign auto_in_b_bits_data = auto_in_b_bits_data_0; // @[Buffer.scala:40:9]
assign auto_in_b_bits_corrupt = auto_in_b_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_in_c_ready = auto_in_c_ready_0; // @[Buffer.scala:40:9]
assign auto_in_d_valid = auto_in_d_valid_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode = auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param = auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size = auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source = auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink = auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied = auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data = auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt = auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_in_e_ready = auto_in_e_ready_0; // @[Buffer.scala:40:9]
assign auto_out_a_valid = auto_out_a_valid_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode = auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param = auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size = auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source = auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address = auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask = auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data = auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt = auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_b_ready = auto_out_b_ready_0; // @[Buffer.scala:40:9]
assign auto_out_c_valid = auto_out_c_valid_0; // @[Buffer.scala:40:9]
assign auto_out_c_bits_opcode = auto_out_c_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_out_c_bits_param = auto_out_c_bits_param_0; // @[Buffer.scala:40:9]
assign auto_out_c_bits_size = auto_out_c_bits_size_0; // @[Buffer.scala:40:9]
assign auto_out_c_bits_source = auto_out_c_bits_source_0; // @[Buffer.scala:40:9]
assign auto_out_c_bits_address = auto_out_c_bits_address_0; // @[Buffer.scala:40:9]
assign auto_out_c_bits_data = auto_out_c_bits_data_0; // @[Buffer.scala:40:9]
assign auto_out_c_bits_corrupt = auto_out_c_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_d_ready = auto_out_d_ready_0; // @[Buffer.scala:40:9]
assign auto_out_e_valid = auto_out_e_valid_0; // @[Buffer.scala:40:9]
assign auto_out_e_bits_sink = auto_out_e_bits_sink_0; // @[Buffer.scala:40:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_13( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_13 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File WidthWidget.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.AddressSet
import freechips.rocketchip.util.{Repeater, UIntToOH1}
// innBeatBytes => the new client-facing bus width
class TLWidthWidget(innerBeatBytes: Int)(implicit p: Parameters) extends LazyModule
{
private def noChangeRequired(manager: TLManagerPortParameters) = manager.beatBytes == innerBeatBytes
val node = new TLAdapterNode(
clientFn = { case c => c },
managerFn = { case m => m.v1copy(beatBytes = innerBeatBytes) }){
override def circuitIdentity = edges.out.map(_.manager).forall(noChangeRequired)
}
override lazy val desiredName = s"TLWidthWidget$innerBeatBytes"
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def merge[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T]) = {
val inBytes = edgeIn.manager.beatBytes
val outBytes = edgeOut.manager.beatBytes
val ratio = outBytes / inBytes
val keepBits = log2Ceil(outBytes)
val dropBits = log2Ceil(inBytes)
val countBits = log2Ceil(ratio)
val size = edgeIn.size(in.bits)
val hasData = edgeIn.hasData(in.bits)
val limit = UIntToOH1(size, keepBits) >> dropBits
val count = RegInit(0.U(countBits.W))
val first = count === 0.U
val last = count === limit || !hasData
val enable = Seq.tabulate(ratio) { i => !((count ^ i.U) & limit).orR }
val corrupt_reg = RegInit(false.B)
val corrupt_in = edgeIn.corrupt(in.bits)
val corrupt_out = corrupt_in || corrupt_reg
when (in.fire) {
count := count + 1.U
corrupt_reg := corrupt_out
when (last) {
count := 0.U
corrupt_reg := false.B
}
}
def helper(idata: UInt): UInt = {
// rdata is X until the first time a multi-beat write occurs.
// Prevent the X from leaking outside by jamming the mux control until
// the first time rdata is written (and hence no longer X).
val rdata_written_once = RegInit(false.B)
val masked_enable = enable.map(_ || !rdata_written_once)
val odata = Seq.fill(ratio) { WireInit(idata) }
val rdata = Reg(Vec(ratio-1, chiselTypeOf(idata)))
val pdata = rdata :+ idata
val mdata = (masked_enable zip (odata zip pdata)) map { case (e, (o, p)) => Mux(e, o, p) }
when (in.fire && !last) {
rdata_written_once := true.B
(rdata zip mdata) foreach { case (r, m) => r := m }
}
Cat(mdata.reverse)
}
in.ready := out.ready || !last
out.valid := in.valid && last
out.bits := in.bits
// Don't put down hardware if we never carry data
edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits)))
edgeOut.corrupt(out.bits) := corrupt_out
(out.bits, in.bits) match {
case (o: TLBundleA, i: TLBundleA) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W))
case (o: TLBundleB, i: TLBundleB) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W))
case (o: TLBundleC, i: TLBundleC) => ()
case (o: TLBundleD, i: TLBundleD) => ()
case _ => require(false, "Impossible bundle combination in WidthWidget")
}
}
def split[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = {
val inBytes = edgeIn.manager.beatBytes
val outBytes = edgeOut.manager.beatBytes
val ratio = inBytes / outBytes
val keepBits = log2Ceil(inBytes)
val dropBits = log2Ceil(outBytes)
val countBits = log2Ceil(ratio)
val size = edgeIn.size(in.bits)
val hasData = edgeIn.hasData(in.bits)
val limit = UIntToOH1(size, keepBits) >> dropBits
val count = RegInit(0.U(countBits.W))
val first = count === 0.U
val last = count === limit || !hasData
when (out.fire) {
count := count + 1.U
when (last) { count := 0.U }
}
// For sub-beat transfer, extract which part matters
val sel = in.bits match {
case a: TLBundleA => a.address(keepBits-1, dropBits)
case b: TLBundleB => b.address(keepBits-1, dropBits)
case c: TLBundleC => c.address(keepBits-1, dropBits)
case d: TLBundleD => {
val sel = sourceMap(d.source)
val hold = Mux(first, sel, RegEnable(sel, first)) // a_first is not for whole xfer
hold & ~limit // if more than one a_first/xfer, the address must be aligned anyway
}
}
val index = sel | count
def helper(idata: UInt, width: Int): UInt = {
val mux = VecInit.tabulate(ratio) { i => idata((i+1)*outBytes*width-1, i*outBytes*width) }
mux(index)
}
out.bits := in.bits
out.valid := in.valid
in.ready := out.ready
// Don't put down hardware if we never carry data
edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits), 8))
(out.bits, in.bits) match {
case (o: TLBundleA, i: TLBundleA) => o.mask := helper(i.mask, 1)
case (o: TLBundleB, i: TLBundleB) => o.mask := helper(i.mask, 1)
case (o: TLBundleC, i: TLBundleC) => () // replicating corrupt to all beats is ok
case (o: TLBundleD, i: TLBundleD) => ()
case _ => require(false, "Impossbile bundle combination in WidthWidget")
}
// Repeat the input if we're not last
!last
}
def splice[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = {
if (edgeIn.manager.beatBytes == edgeOut.manager.beatBytes) {
// nothing to do; pass it through
out.bits := in.bits
out.valid := in.valid
in.ready := out.ready
} else if (edgeIn.manager.beatBytes > edgeOut.manager.beatBytes) {
// split input to output
val repeat = Wire(Bool())
val repeated = Repeater(in, repeat)
val cated = Wire(chiselTypeOf(repeated))
cated <> repeated
edgeIn.data(cated.bits) := Cat(
edgeIn.data(repeated.bits)(edgeIn.manager.beatBytes*8-1, edgeOut.manager.beatBytes*8),
edgeIn.data(in.bits)(edgeOut.manager.beatBytes*8-1, 0))
repeat := split(edgeIn, cated, edgeOut, out, sourceMap)
} else {
// merge input to output
merge(edgeIn, in, edgeOut, out)
}
}
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
// If the master is narrower than the slave, the D channel must be narrowed.
// This is tricky, because the D channel has no address data.
// Thus, you don't know which part of a sub-beat transfer to extract.
// To fix this, we record the relevant address bits for all sources.
// The assumption is that this sort of situation happens only where
// you connect a narrow master to the system bus, so there are few sources.
def sourceMap(source_bits: UInt) = {
val source = if (edgeIn.client.endSourceId == 1) 0.U(0.W) else source_bits
require (edgeOut.manager.beatBytes > edgeIn.manager.beatBytes)
val keepBits = log2Ceil(edgeOut.manager.beatBytes)
val dropBits = log2Ceil(edgeIn.manager.beatBytes)
val sources = Reg(Vec(edgeIn.client.endSourceId, UInt((keepBits-dropBits).W)))
val a_sel = in.a.bits.address(keepBits-1, dropBits)
when (in.a.fire) {
if (edgeIn.client.endSourceId == 1) { // avoid extraction-index-width warning
sources(0) := a_sel
} else {
sources(in.a.bits.source) := a_sel
}
}
// depopulate unused source registers:
edgeIn.client.unusedSources.foreach { id => sources(id) := 0.U }
val bypass = in.a.valid && in.a.bits.source === source
if (edgeIn.manager.minLatency > 0) sources(source)
else Mux(bypass, a_sel, sources(source))
}
splice(edgeIn, in.a, edgeOut, out.a, sourceMap)
splice(edgeOut, out.d, edgeIn, in.d, sourceMap)
if (edgeOut.manager.anySupportAcquireB && edgeIn.client.anySupportProbe) {
splice(edgeOut, out.b, edgeIn, in.b, sourceMap)
splice(edgeIn, in.c, edgeOut, out.c, sourceMap)
out.e.valid := in.e.valid
out.e.bits := in.e.bits
in.e.ready := out.e.ready
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLWidthWidget
{
def apply(innerBeatBytes: Int)(implicit p: Parameters): TLNode =
{
val widget = LazyModule(new TLWidthWidget(innerBeatBytes))
widget.node
}
def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper.beatBytes)
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMWidthWidget(first: Int, second: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("WidthWidget"))
val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff)))
(ram.node
:= TLDelayer(0.1)
:= TLFragmenter(4, 256)
:= TLWidthWidget(second)
:= TLWidthWidget(first)
:= TLDelayer(0.1)
:= model.node
:= fuzz.node)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMWidthWidgetTest(little: Int, big: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMWidthWidget(little,big,txns)).module)
dut.io.start := DontCare
io.finished := dut.io.finished
}
File VectorScalarMultiplier.scala:
package gemmini
import chisel3._
import chisel3.util._
import Util._
class VectorScalarMultiplierReq[T <: Data, U <: Data, Tag <: Data](block_cols: Int, t: T, u: U, tag_t: Tag) extends Bundle {
val in: Vec[T] = Vec(block_cols, t.cloneType)
val scale: U = u.cloneType
val repeats: UInt = UInt(16.W) // TODO magic number
val pixel_repeats: UInt = UInt(8.W) // TODO magic number
val last: Bool = Bool()
val tag: Tag = tag_t.cloneType
}
class VectorScalarMultiplierResp[T <: Data, Tag <: Data](block_cols: Int, t: T, tag_t: Tag) extends Bundle {
val out: Vec[T] = Vec(block_cols, t.cloneType)
val row: UInt = UInt(16.W) // TODO magic number
val last: Bool = Bool()
val tag: Tag = tag_t.cloneType
}
class DataWithIndex[T <: Data, U <: Data](t: T, u: U) extends Bundle {
val data = t.cloneType
val scale = u.cloneType
val id = UInt(2.W) // TODO hardcoded
val index = UInt()
}
class ScalePipe[T <: Data, U <: Data](t: T, mvin_scale_args: ScaleArguments[T, U]) extends Module {
val u = mvin_scale_args.multiplicand_t
val io = IO(new Bundle {
val in = Input(Valid(new DataWithIndex(t, u)))
val out = Output(Valid(new DataWithIndex(t, u)))
})
val latency = mvin_scale_args.latency
val out = WireInit(io.in)
out.bits.data := mvin_scale_args.scale_func(io.in.bits.data, io.in.bits.scale.asTypeOf(u))
io.out := Pipe(out, latency)
}
class VectorScalarMultiplier[T <: Data, U <: Data, Tag <: Data](
mvin_scale_args: Option[ScaleArguments[T, U]], block_cols: Int, t: T, tag_t: Tag
) extends Module {
val (u, num_scale_units, always_identity) = mvin_scale_args match {
case Some(ScaleArguments(_, _, multiplicand_t, num_scale_units, _, _)) => (multiplicand_t, num_scale_units, false)
case None => (Bool(), -1, true) // TODO make this a 0-width UInt
}
val io = IO(new Bundle {
val req = Flipped(Decoupled(new VectorScalarMultiplierReq(block_cols, t, u, tag_t)))
val resp = Decoupled(new VectorScalarMultiplierResp(block_cols, t, tag_t))
})
val width = block_cols
val latency = mvin_scale_args match {
case Some(ScaleArguments(_, latency, _, _, _, _)) => latency
case None => 0
}
val in = Reg(Valid(new VectorScalarMultiplierReq(block_cols, t, u, tag_t)))
val in_fire = WireInit(false.B)
io.req.ready := !in.valid || (in.bits.repeats === 0.U && in_fire)
when (io.req.fire) {
in.valid := io.req.valid
in.bits := io.req.bits
} .elsewhen (in_fire) {
when (in.bits.repeats === 0.U) {
in.valid := false.B
}
in.bits.repeats := in.bits.repeats - 1.U
}
when (reset.asBool) {
in.valid := false.B
}
if (num_scale_units == -1) {
val pipe = Module(new Pipeline[VectorScalarMultiplierResp[T, Tag]](
new VectorScalarMultiplierResp(block_cols, t, tag_t),
latency
)())
io.resp <> pipe.io.out
in_fire := pipe.io.in.fire
pipe.io.in.valid := in.valid
pipe.io.in.bits.tag := in.bits.tag
pipe.io.in.bits.last := in.bits.repeats === 0.U && in.bits.last
pipe.io.in.bits.row := in.bits.repeats
pipe.io.in.bits.out := (mvin_scale_args match {
case Some(ScaleArguments(mvin_scale_func, _, multiplicand_t, _, _, _)) =>
in.bits.in.map(x => mvin_scale_func(x, in.bits.scale.asTypeOf(multiplicand_t)))
case None => in.bits.in
})
} else {
val nEntries = 3
val regs = Reg(Vec(nEntries, Valid(new VectorScalarMultiplierReq(block_cols, t, u, tag_t))))
val out_regs = Reg(Vec(nEntries, new VectorScalarMultiplierResp(block_cols, t, tag_t)))
val fired_masks = Reg(Vec(nEntries, Vec(width, Bool())))
val completed_masks = Reg(Vec(nEntries, Vec(width, Bool())))
val head_oh = RegInit(1.U(nEntries.W))
val tail_oh = RegInit(1.U(nEntries.W))
io.resp.valid := Mux1H(head_oh.asBools, (regs zip completed_masks).map({case (r,c) => r.valid && c.reduce(_&&_)}))
io.resp.bits := Mux1H(head_oh.asBools, out_regs)
when (io.resp.fire) {
for (i <- 0 until nEntries) {
when (head_oh(i)) {
regs(i).valid := false.B
}
}
head_oh := (head_oh << 1) | head_oh(nEntries-1)
}
in_fire := (in.valid &&
(!Mux1H(tail_oh.asBools, regs.map(_.valid)))
)
when (in_fire) {
for (i <- 0 until nEntries) {
when (tail_oh(i)) {
regs(i).valid := true.B
regs(i).bits := in.bits
out_regs(i).tag := in.bits.tag
out_regs(i).last := in.bits.repeats === 0.U && in.bits.last
out_regs(i).row := in.bits.repeats
out_regs(i).out := in.bits.in
val identity = (u match {
case u: UInt => Arithmetic.UIntArithmetic.cast(u).identity
case s: SInt => Arithmetic.SIntArithmetic.cast(s).identity
case f: Float => Arithmetic.FloatArithmetic.cast(f).identity
case b: Bool => 1.U(1.W)
})
fired_masks(i).foreach(_ := in.bits.scale.asUInt === identity.asUInt || always_identity.B)
completed_masks(i).foreach(_ := in.bits.scale.asUInt === identity.asUInt || always_identity.B)
}
}
tail_oh := (tail_oh << 1) | tail_oh(nEntries-1)
}
val inputs = Seq.fill(width*nEntries) { Wire(Decoupled(new DataWithIndex(t, u))) }
for (i <- 0 until nEntries) {
for (w <- 0 until width) {
val input = inputs(i*width+w)
input.valid := regs(i).valid && !fired_masks(i)(w)
input.bits.data := regs(i).bits.in(w)
input.bits.scale := regs(i).bits.scale.asTypeOf(u)
input.bits.id := i.U
input.bits.index := w.U
when (input.fire) {
fired_masks(i)(w) := true.B
}
}
}
for (i <- 0 until num_scale_units) {
val arbIn = inputs.zipWithIndex.filter({ case (_, w) => w % num_scale_units == i }).map(_._1)
val arb = Module(new RRArbiter(new DataWithIndex(t, u), arbIn.length))
arb.io.in <> arbIn
arb.io.out.ready := true.B
val arbOut = Reg(Valid(new DataWithIndex(t, u)))
arbOut.valid := arb.io.out.valid
arbOut.bits := arb.io.out.bits
when (reset.asBool) {
arbOut.valid := false.B
}
val pipe = Module(new ScalePipe(t, mvin_scale_args.get))
pipe.io.in := arbOut
val pipe_out = pipe.io.out
for (j <- 0 until nEntries) {
for (w <- 0 until width) {
if ((j*width+w) % num_scale_units == i) {
when (pipe_out.fire && pipe_out.bits.id === j.U && pipe_out.bits.index === w.U) {
out_regs(j).out(w) := pipe_out.bits.data
completed_masks(j)(w) := true.B
}
}
}
}
}
when (reset.asBool) {
regs.foreach(_.valid := false.B)
}
}
}
object VectorScalarMultiplier {
// Returns the input and output IO of the module (together with the pipeline)
def apply[T <: Data, U <: Data, Tag <: Data](
scale_args: Option[ScaleArguments[T, U]],
t: T, cols: Int, tag_t: Tag,
is_acc: Boolean,
is_mvin: Boolean=true
) = {
assert(!is_acc || is_mvin)
val vsm = Module(new VectorScalarMultiplier(scale_args, cols, t, tag_t))
val vsm_in_q = Module(new Queue(chiselTypeOf(vsm.io.req.bits), 2))
vsm.io.req <> vsm_in_q.io.deq
(vsm_in_q.io.enq, vsm.io.resp)
}
}
File LocalAddr.scala:
package gemmini
import chisel3._
import chisel3.util._
class LocalAddr(sp_banks: Int, sp_bank_entries: Int, acc_banks: Int, acc_bank_entries: Int) extends Bundle {
private val localAddrBits = 32 // TODO magic number
private val spAddrBits = log2Ceil(sp_banks * sp_bank_entries)
private val accAddrBits = log2Ceil(acc_banks * acc_bank_entries)
private val maxAddrBits = spAddrBits max accAddrBits
private val spBankBits = log2Up(sp_banks)
private val spBankRowBits = log2Up(sp_bank_entries)
private val accBankBits = log2Up(acc_banks)
val accBankRowBits = log2Up(acc_bank_entries)
val spRows = sp_banks * sp_bank_entries
val is_acc_addr = Bool()
val accumulate = Bool()
val read_full_acc_row = Bool()
val norm_cmd = NormCmd()
private val metadata_w = is_acc_addr.getWidth + accumulate.getWidth + read_full_acc_row.getWidth + norm_cmd.getWidth
assert(maxAddrBits + metadata_w < 32)
val garbage = UInt(((localAddrBits - maxAddrBits - metadata_w - 1) max 0).W)
val garbage_bit = if (localAddrBits - maxAddrBits >= metadata_w + 1) UInt(1.W) else UInt(0.W)
val data = UInt(maxAddrBits.W)
def sp_bank(dummy: Int = 0) = if (spAddrBits == spBankRowBits) 0.U else data(spAddrBits - 1, spBankRowBits)
def sp_row(dummy: Int = 0) = data(spBankRowBits - 1, 0)
def acc_bank(dummy: Int = 0) = if (accAddrBits == accBankRowBits) 0.U else data(accAddrBits - 1, accBankRowBits)
def acc_row(dummy: Int = 0) = data(accBankRowBits - 1, 0)
def full_sp_addr(dummy: Int = 0) = data(spAddrBits - 1, 0)
def full_acc_addr(dummy: Int = 0) = data(accAddrBits - 1, 0)
def is_same_address(other: LocalAddr): Bool = is_acc_addr === other.is_acc_addr && data === other.data
def is_same_address(other: UInt): Bool = is_same_address(other.asTypeOf(this))
def is_garbage(dummy: Int = 0) = is_acc_addr && accumulate && read_full_acc_row && data.andR &&
(if (garbage_bit.getWidth > 0) garbage_bit.asBool else true.B)
def +(other: UInt) = {
require(isPow2(sp_bank_entries)) // TODO remove this requirement
require(isPow2(acc_bank_entries)) // TODO remove this requirement
val result = WireInit(this)
result.data := data + other
result
}
def <=(other: LocalAddr) =
is_acc_addr === other.is_acc_addr &&
Mux(is_acc_addr, full_acc_addr() <= other.full_acc_addr(), full_sp_addr() <= other.full_sp_addr())
def <(other: LocalAddr) =
is_acc_addr === other.is_acc_addr &&
Mux(is_acc_addr, full_acc_addr() < other.full_acc_addr(), full_sp_addr() < other.full_sp_addr())
def >(other: LocalAddr) =
is_acc_addr === other.is_acc_addr &&
Mux(is_acc_addr, full_acc_addr() > other.full_acc_addr(), full_sp_addr() > other.full_sp_addr())
def add_with_overflow(other: UInt): Tuple2[LocalAddr, Bool] = {
require(isPow2(sp_bank_entries)) // TODO remove this requirement
require(isPow2(acc_bank_entries)) // TODO remove this requirement
val sum = data +& other
val overflow = Mux(is_acc_addr, sum(accAddrBits), sum(spAddrBits))
val result = WireInit(this)
result.data := sum(maxAddrBits - 1, 0)
(result, overflow)
}
// This function can only be used with non-accumulator addresses. Returns both new address and underflow
def floorSub(other: UInt, floor: UInt): (LocalAddr, Bool) = {
require(isPow2(sp_bank_entries)) // TODO remove this requirement
require(isPow2(acc_bank_entries)) // TODO remove this requirement
val underflow = data < (floor +& other)
val result = WireInit(this)
result.data := Mux(underflow, floor, data - other)
(result, underflow)
}
def make_this_garbage(dummy: Int = 0): Unit = {
is_acc_addr := true.B
accumulate := true.B
read_full_acc_row := true.B
garbage_bit := 1.U
data := ~(0.U(maxAddrBits.W))
}
}
object LocalAddr {
def cast_to_local_addr[T <: Data](local_addr_t: LocalAddr, t: T): LocalAddr = {
// This convenience function is basically the same as calling "asTypeOf(local_addr_t)". However, this convenience
// function will also cast unnecessary garbage bits to 0, which may help reduce multiplier/adder bitwidths
val result = WireInit(t.asTypeOf(local_addr_t))
if (result.garbage_bit.getWidth > 0) result.garbage := 0.U
result
}
def cast_to_sp_addr[T <: Data](local_addr_t: LocalAddr, t: T): LocalAddr = {
// This function is a wrapper around cast_to_local_addr, but it assumes that the input will not be the garbage
// address
val result = WireInit(cast_to_local_addr(local_addr_t, t))
result.is_acc_addr := false.B
result.accumulate := false.B
result.read_full_acc_row := false.B
// assert(!result.garbage_bit, "cast_to_sp_addr doesn't work on garbage addresses")
result
}
def cast_to_acc_addr[T <: Data](local_addr_t: LocalAddr, t: T, accumulate: Bool, read_full: Bool): LocalAddr = {
// This function is a wrapper around cast_to_local_addr, but it assumes that the input will not be the garbage
// address
val result = WireInit(cast_to_local_addr(local_addr_t, t))
result.is_acc_addr := true.B
result.accumulate := accumulate
result.read_full_acc_row := read_full
// assert(!result.garbage_bit, "cast_to_acc_addr doesn't work on garbage addresses")
result
}
def garbage_addr(local_addr_t: LocalAddr): LocalAddr = {
val result = Wire(chiselTypeOf(local_addr_t))
result := DontCare
result.make_this_garbage()
result
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
File Scratchpad.scala:
package gemmini
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp}
import freechips.rocketchip.rocket._
import freechips.rocketchip.tile._
import freechips.rocketchip.tilelink._
import Util._
class ScratchpadMemReadRequest[U <: Data](local_addr_t: LocalAddr, scale_t_bits: Int)(implicit p: Parameters) extends CoreBundle {
val vaddr = UInt(coreMaxAddrBits.W)
val laddr = local_addr_t.cloneType
val cols = UInt(16.W) // TODO don't use a magic number for the width here
val repeats = UInt(16.W) // TODO don't use a magic number for the width here
val scale = UInt(scale_t_bits.W)
val has_acc_bitwidth = Bool()
val all_zeros = Bool()
val block_stride = UInt(16.W) // TODO magic numbers
val pixel_repeats = UInt(8.W) // TODO magic numbers
val cmd_id = UInt(8.W) // TODO don't use a magic number here
val status = new MStatus
}
class ScratchpadMemWriteRequest(local_addr_t: LocalAddr, acc_t_bits: Int, scale_t_bits: Int)
(implicit p: Parameters) extends CoreBundle {
val vaddr = UInt(coreMaxAddrBits.W)
val laddr = local_addr_t.cloneType
val acc_act = UInt(Activation.bitwidth.W) // TODO don't use a magic number for the width here
val acc_scale = UInt(scale_t_bits.W)
val acc_igelu_qb = UInt(acc_t_bits.W)
val acc_igelu_qc = UInt(acc_t_bits.W)
val acc_iexp_qln2 = UInt(acc_t_bits.W)
val acc_iexp_qln2_inv = UInt(acc_t_bits.W)
val acc_norm_stats_id = UInt(8.W) // TODO magic number
val len = UInt(16.W) // TODO don't use a magic number for the width here
val block = UInt(8.W) // TODO don't use a magic number for the width here
val cmd_id = UInt(8.W) // TODO don't use a magic number here
val status = new MStatus
// Pooling variables
val pool_en = Bool()
val store_en = Bool()
}
class ScratchpadMemWriteResponse extends Bundle {
val cmd_id = UInt(8.W) // TODO don't use a magic number here
}
class ScratchpadMemReadResponse extends Bundle {
val bytesRead = UInt(16.W) // TODO magic number here
val cmd_id = UInt(8.W) // TODO don't use a magic number here
}
class ScratchpadReadMemIO[U <: Data](local_addr_t: LocalAddr, scale_t_bits: Int)(implicit p: Parameters) extends CoreBundle {
val req = Decoupled(new ScratchpadMemReadRequest(local_addr_t, scale_t_bits))
val resp = Flipped(Valid(new ScratchpadMemReadResponse))
}
class ScratchpadWriteMemIO(local_addr_t: LocalAddr, acc_t_bits: Int, scale_t_bits: Int)
(implicit p: Parameters) extends CoreBundle {
val req = Decoupled(new ScratchpadMemWriteRequest(local_addr_t, acc_t_bits, scale_t_bits))
val resp = Flipped(Valid(new ScratchpadMemWriteResponse))
}
class ScratchpadReadReq(val n: Int) extends Bundle {
val addr = UInt(log2Ceil(n).W)
val fromDMA = Bool()
}
class ScratchpadReadResp(val w: Int) extends Bundle {
val data = UInt(w.W)
val fromDMA = Bool()
}
class ScratchpadReadIO(val n: Int, val w: Int) extends Bundle {
val req = Decoupled(new ScratchpadReadReq(n))
val resp = Flipped(Decoupled(new ScratchpadReadResp(w)))
}
class ScratchpadWriteIO(val n: Int, val w: Int, val mask_len: Int) extends Bundle {
val en = Output(Bool())
val addr = Output(UInt(log2Ceil(n).W))
val mask = Output(Vec(mask_len, Bool()))
val data = Output(UInt(w.W))
}
class ScratchpadBank(n: Int, w: Int, aligned_to: Int, single_ported: Boolean, use_shared_ext_mem: Boolean, is_dummy: Boolean) extends Module {
// This is essentially a pipelined SRAM with the ability to stall pipeline stages
require(w % aligned_to == 0 || w < aligned_to)
val mask_len = (w / (aligned_to * 8)) max 1 // How many mask bits are there?
val mask_elem = UInt((w min (aligned_to * 8)).W) // What datatype does each mask bit correspond to?
val io = IO(new Bundle {
val read = Flipped(new ScratchpadReadIO(n, w))
val write = Flipped(new ScratchpadWriteIO(n, w, mask_len))
val ext_mem = if (use_shared_ext_mem) Some(new ExtMemIO) else None
})
val (read, write) = if (is_dummy) {
def read(addr: UInt, ren: Bool): Data = 0.U
def write(addr: UInt, wdata: Vec[UInt], wmask: Vec[Bool]): Unit = { }
(read _, write _)
} else if (use_shared_ext_mem) {
def read(addr: UInt, ren: Bool): Data = {
io.ext_mem.get.read_en := ren
io.ext_mem.get.read_addr := addr
io.ext_mem.get.read_data
}
io.ext_mem.get.write_en := false.B
io.ext_mem.get.write_addr := DontCare
io.ext_mem.get.write_data := DontCare
io.ext_mem.get.write_mask := DontCare
def write(addr: UInt, wdata: Vec[UInt], wmask: Vec[Bool]) = {
io.ext_mem.get.write_en := true.B
io.ext_mem.get.write_addr := addr
io.ext_mem.get.write_data := wdata.asUInt
io.ext_mem.get.write_mask := wmask.asUInt
}
(read _, write _)
} else {
val mem = SyncReadMem(n, Vec(mask_len, mask_elem))
def read(addr: UInt, ren: Bool): Data = mem.read(addr, ren)
def write(addr: UInt, wdata: Vec[UInt], wmask: Vec[Bool]) = mem.write(addr, wdata, wmask)
(read _, write _)
}
// When the scratchpad is single-ported, the writes take precedence
val singleport_busy_with_write = single_ported.B && io.write.en
when (io.write.en) {
if (aligned_to >= w)
write(io.write.addr, io.write.data.asTypeOf(Vec(mask_len, mask_elem)), VecInit((~(0.U(mask_len.W))).asBools))
else
write(io.write.addr, io.write.data.asTypeOf(Vec(mask_len, mask_elem)), io.write.mask)
}
val raddr = io.read.req.bits.addr
val ren = io.read.req.fire
val rdata = if (single_ported) {
assert(!(ren && io.write.en))
read(raddr, ren && !io.write.en).asUInt
} else {
read(raddr, ren).asUInt
}
val fromDMA = io.read.req.bits.fromDMA
// Make a queue which buffers the result of an SRAM read if it can't immediately be consumed
val q = Module(new Queue(new ScratchpadReadResp(w), 1, true, true))
q.io.enq.valid := RegNext(ren)
q.io.enq.bits.data := rdata
q.io.enq.bits.fromDMA := RegNext(fromDMA)
val q_will_be_empty = (q.io.count +& q.io.enq.fire) - q.io.deq.fire === 0.U
io.read.req.ready := q_will_be_empty && !singleport_busy_with_write
io.read.resp <> q.io.deq
}
class Scratchpad[T <: Data, U <: Data, V <: Data](config: GemminiArrayConfig[T, U, V])
(implicit p: Parameters, ev: Arithmetic[T]) extends LazyModule {
import config._
import ev._
val maxBytes = dma_maxbytes
val dataBits = dma_buswidth
val block_rows = meshRows * tileRows
val block_cols = meshColumns * tileColumns
val spad_w = inputType.getWidth * block_cols
val acc_w = accType.getWidth * block_cols
val id_node = TLIdentityNode()
val xbar_node = TLXbar()
val reader = LazyModule(new StreamReader(config, max_in_flight_mem_reqs, dataBits, maxBytes, spad_w, acc_w, aligned_to,
sp_banks * sp_bank_entries, acc_banks * acc_bank_entries, block_rows, use_tlb_register_filter,
use_firesim_simulation_counters))
val writer = LazyModule(new StreamWriter(max_in_flight_mem_reqs, dataBits, maxBytes,
if (acc_read_full_width) acc_w else spad_w, aligned_to, inputType, block_cols, use_tlb_register_filter,
use_firesim_simulation_counters))
// TODO make a cross-bar vs two separate ports a config option
// id_node :=* reader.node
// id_node :=* writer.node
xbar_node := TLBuffer() := reader.node // TODO
xbar_node := TLBuffer() := writer.node
id_node := TLWidthWidget(config.dma_buswidth/8) := TLBuffer() := xbar_node
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with HasCoreParameters {
val io = IO(new Bundle {
// DMA ports
val dma = new Bundle {
val read = Flipped(new ScratchpadReadMemIO(local_addr_t, mvin_scale_t_bits))
val write = Flipped(new ScratchpadWriteMemIO(local_addr_t, accType.getWidth, acc_scale_t_bits))
}
// SRAM ports
val srams = new Bundle {
val read = Flipped(Vec(sp_banks, new ScratchpadReadIO(sp_bank_entries, spad_w)))
val write = Flipped(Vec(sp_banks, new ScratchpadWriteIO(sp_bank_entries, spad_w, (spad_w / (aligned_to * 8)) max 1)))
}
// Accumulator ports
val acc = new Bundle {
val read_req = Flipped(Vec(acc_banks, Decoupled(new AccumulatorReadReq(
acc_bank_entries, accType, acc_scale_t.asInstanceOf[V]
))))
val read_resp = Vec(acc_banks, Decoupled(new AccumulatorScaleResp(
Vec(meshColumns, Vec(tileColumns, inputType)),
Vec(meshColumns, Vec(tileColumns, accType))
)))
val write = Flipped(Vec(acc_banks, Decoupled(new AccumulatorWriteReq(
acc_bank_entries, Vec(meshColumns, Vec(tileColumns, accType))
))))
}
val ext_mem = if (use_shared_ext_mem) {
Some(new ExtSpadMemIO(sp_banks, acc_banks, acc_sub_banks))
} else {
None
}
// TLB ports
val tlb = Vec(2, new FrontendTLBIO)
// Misc. ports
val busy = Output(Bool())
val flush = Input(Bool())
val counter = new CounterEventIO()
})
val write_dispatch_q = Queue(io.dma.write.req)
// Write norm/scale queues are necessary to maintain in-order requests to accumulator norm/scale units
// Writes from main SPAD just flow directly between scale_q and issue_q, while writes
// From acc are ordered
val write_norm_q = Module(new Queue(new ScratchpadMemWriteRequest(local_addr_t, accType.getWidth, acc_scale_t_bits), spad_read_delay+2))
val write_scale_q = Module(new Queue(new ScratchpadMemWriteRequest(local_addr_t, accType.getWidth, acc_scale_t_bits), spad_read_delay+2))
val write_issue_q = Module(new Queue(new ScratchpadMemWriteRequest(local_addr_t, accType.getWidth, acc_scale_t_bits), spad_read_delay+1, pipe=true))
val read_issue_q = Module(new Queue(new ScratchpadMemReadRequest(local_addr_t, mvin_scale_t_bits), spad_read_delay+1, pipe=true)) // TODO can't this just be a normal queue?
write_dispatch_q.ready := false.B
write_norm_q.io.enq.valid := false.B
write_norm_q.io.enq.bits := write_dispatch_q.bits
write_norm_q.io.deq.ready := false.B
write_scale_q.io.enq.valid := false.B
write_scale_q.io.enq.bits := write_norm_q.io.deq.bits
write_scale_q.io.deq.ready := false.B
write_issue_q.io.enq.valid := false.B
write_issue_q.io.enq.bits := write_scale_q.io.deq.bits
// Garbage can immediately fire from dispatch_q -> norm_q
when (write_dispatch_q.bits.laddr.is_garbage()) {
write_norm_q.io.enq <> write_dispatch_q
}
// Non-acc or garbage can immediately fire between norm_q and scale_q
when (write_norm_q.io.deq.bits.laddr.is_garbage() || !write_norm_q.io.deq.bits.laddr.is_acc_addr) {
write_scale_q.io.enq <> write_norm_q.io.deq
}
// Non-acc or garbage can immediately fire between scale_q and issue_q
when (write_scale_q.io.deq.bits.laddr.is_garbage() || !write_scale_q.io.deq.bits.laddr.is_acc_addr) {
write_issue_q.io.enq <> write_scale_q.io.deq
}
val writeData = Wire(Valid(UInt((spad_w max acc_w).W)))
writeData.valid := write_issue_q.io.deq.bits.laddr.is_garbage()
writeData.bits := DontCare
val fullAccWriteData = Wire(UInt(acc_w.W))
fullAccWriteData := DontCare
val writeData_is_full_width = !write_issue_q.io.deq.bits.laddr.is_garbage() &&
write_issue_q.io.deq.bits.laddr.is_acc_addr && write_issue_q.io.deq.bits.laddr.read_full_acc_row
val writeData_is_all_zeros = write_issue_q.io.deq.bits.laddr.is_garbage()
writer.module.io.req.valid := write_issue_q.io.deq.valid && writeData.valid
write_issue_q.io.deq.ready := writer.module.io.req.ready && writeData.valid
writer.module.io.req.bits.vaddr := write_issue_q.io.deq.bits.vaddr
writer.module.io.req.bits.len := Mux(writeData_is_full_width,
write_issue_q.io.deq.bits.len * (accType.getWidth / 8).U,
write_issue_q.io.deq.bits.len * (inputType.getWidth / 8).U)
writer.module.io.req.bits.data := MuxCase(writeData.bits, Seq(
writeData_is_all_zeros -> 0.U,
writeData_is_full_width -> fullAccWriteData
))
writer.module.io.req.bits.block := write_issue_q.io.deq.bits.block
writer.module.io.req.bits.status := write_issue_q.io.deq.bits.status
writer.module.io.req.bits.pool_en := write_issue_q.io.deq.bits.pool_en
writer.module.io.req.bits.store_en := write_issue_q.io.deq.bits.store_en
io.dma.write.resp.valid := false.B
io.dma.write.resp.bits.cmd_id := write_dispatch_q.bits.cmd_id
when (write_dispatch_q.bits.laddr.is_garbage() && write_dispatch_q.fire) {
io.dma.write.resp.valid := true.B
}
read_issue_q.io.enq <> io.dma.read.req
val zero_writer = Module(new ZeroWriter(config, new ScratchpadMemReadRequest(local_addr_t, mvin_scale_t_bits)))
when (io.dma.read.req.bits.all_zeros) {
read_issue_q.io.enq.valid := false.B
io.dma.read.req.ready := zero_writer.io.req.ready
}
zero_writer.io.req.valid := io.dma.read.req.valid && io.dma.read.req.bits.all_zeros
zero_writer.io.req.bits.laddr := io.dma.read.req.bits.laddr
zero_writer.io.req.bits.cols := io.dma.read.req.bits.cols
zero_writer.io.req.bits.block_stride := io.dma.read.req.bits.block_stride
zero_writer.io.req.bits.tag := io.dma.read.req.bits
val zero_writer_pixel_repeater = Module(new PixelRepeater(inputType, local_addr_t, block_cols, aligned_to, new ScratchpadMemReadRequest(local_addr_t, mvin_scale_t_bits), passthrough = !has_first_layer_optimizations))
zero_writer_pixel_repeater.io.req.valid := zero_writer.io.resp.valid
zero_writer_pixel_repeater.io.req.bits.in := 0.U.asTypeOf(Vec(block_cols, inputType))
zero_writer_pixel_repeater.io.req.bits.laddr := zero_writer.io.resp.bits.laddr
zero_writer_pixel_repeater.io.req.bits.len := zero_writer.io.resp.bits.tag.cols
zero_writer_pixel_repeater.io.req.bits.pixel_repeats := zero_writer.io.resp.bits.tag.pixel_repeats
zero_writer_pixel_repeater.io.req.bits.last := zero_writer.io.resp.bits.last
zero_writer_pixel_repeater.io.req.bits.tag := zero_writer.io.resp.bits.tag
zero_writer_pixel_repeater.io.req.bits.mask := {
val n = inputType.getWidth / 8
val mask = zero_writer.io.resp.bits.mask
val expanded = VecInit(mask.flatMap(e => Seq.fill(n)(e)))
expanded
}
zero_writer.io.resp.ready := zero_writer_pixel_repeater.io.req.ready
zero_writer_pixel_repeater.io.resp.ready := false.B
reader.module.io.req.valid := read_issue_q.io.deq.valid
read_issue_q.io.deq.ready := reader.module.io.req.ready
reader.module.io.req.bits.vaddr := read_issue_q.io.deq.bits.vaddr
reader.module.io.req.bits.spaddr := Mux(read_issue_q.io.deq.bits.laddr.is_acc_addr,
read_issue_q.io.deq.bits.laddr.full_acc_addr(), read_issue_q.io.deq.bits.laddr.full_sp_addr())
reader.module.io.req.bits.len := read_issue_q.io.deq.bits.cols
reader.module.io.req.bits.repeats := read_issue_q.io.deq.bits.repeats
reader.module.io.req.bits.pixel_repeats := read_issue_q.io.deq.bits.pixel_repeats
reader.module.io.req.bits.scale := read_issue_q.io.deq.bits.scale
reader.module.io.req.bits.is_acc := read_issue_q.io.deq.bits.laddr.is_acc_addr
reader.module.io.req.bits.accumulate := read_issue_q.io.deq.bits.laddr.accumulate
reader.module.io.req.bits.has_acc_bitwidth := read_issue_q.io.deq.bits.has_acc_bitwidth
reader.module.io.req.bits.block_stride := read_issue_q.io.deq.bits.block_stride
reader.module.io.req.bits.status := read_issue_q.io.deq.bits.status
reader.module.io.req.bits.cmd_id := read_issue_q.io.deq.bits.cmd_id
val (mvin_scale_in, mvin_scale_out) = VectorScalarMultiplier(
config.mvin_scale_args,
config.inputType, config.meshColumns * config.tileColumns, chiselTypeOf(reader.module.io.resp.bits),
is_acc = false
)
val (mvin_scale_acc_in, mvin_scale_acc_out) = if (mvin_scale_shared) (mvin_scale_in, mvin_scale_out) else (
VectorScalarMultiplier(
config.mvin_scale_acc_args,
config.accType, config.meshColumns * config.tileColumns, chiselTypeOf(reader.module.io.resp.bits),
is_acc = true
)
)
mvin_scale_in.valid := reader.module.io.resp.valid && (mvin_scale_shared.B || !reader.module.io.resp.bits.is_acc ||
(reader.module.io.resp.bits.is_acc && !reader.module.io.resp.bits.has_acc_bitwidth))
mvin_scale_in.bits.in := reader.module.io.resp.bits.data.asTypeOf(chiselTypeOf(mvin_scale_in.bits.in))
mvin_scale_in.bits.scale := reader.module.io.resp.bits.scale.asTypeOf(mvin_scale_t)
mvin_scale_in.bits.repeats := reader.module.io.resp.bits.repeats
mvin_scale_in.bits.pixel_repeats := reader.module.io.resp.bits.pixel_repeats
mvin_scale_in.bits.last := reader.module.io.resp.bits.last
mvin_scale_in.bits.tag := reader.module.io.resp.bits
val mvin_scale_pixel_repeater = Module(new PixelRepeater(inputType, local_addr_t, block_cols, aligned_to, mvin_scale_out.bits.tag.cloneType, passthrough = !has_first_layer_optimizations))
mvin_scale_pixel_repeater.io.req.valid := mvin_scale_out.valid
mvin_scale_pixel_repeater.io.req.bits.in := mvin_scale_out.bits.out
mvin_scale_pixel_repeater.io.req.bits.mask := mvin_scale_out.bits.tag.mask take mvin_scale_pixel_repeater.io.req.bits.mask.size
mvin_scale_pixel_repeater.io.req.bits.laddr := mvin_scale_out.bits.tag.addr.asTypeOf(local_addr_t) + mvin_scale_out.bits.row
mvin_scale_pixel_repeater.io.req.bits.len := mvin_scale_out.bits.tag.len
mvin_scale_pixel_repeater.io.req.bits.pixel_repeats := mvin_scale_out.bits.tag.pixel_repeats
mvin_scale_pixel_repeater.io.req.bits.last := mvin_scale_out.bits.last
mvin_scale_pixel_repeater.io.req.bits.tag := mvin_scale_out.bits.tag
mvin_scale_out.ready := mvin_scale_pixel_repeater.io.req.ready
mvin_scale_pixel_repeater.io.resp.ready := false.B
if (!mvin_scale_shared) {
mvin_scale_acc_in.valid := reader.module.io.resp.valid &&
(reader.module.io.resp.bits.is_acc && reader.module.io.resp.bits.has_acc_bitwidth)
mvin_scale_acc_in.bits.in := reader.module.io.resp.bits.data.asTypeOf(chiselTypeOf(mvin_scale_acc_in.bits.in))
mvin_scale_acc_in.bits.scale := reader.module.io.resp.bits.scale.asTypeOf(mvin_scale_acc_t)
mvin_scale_acc_in.bits.repeats := reader.module.io.resp.bits.repeats
mvin_scale_acc_in.bits.pixel_repeats := 1.U
mvin_scale_acc_in.bits.last := reader.module.io.resp.bits.last
mvin_scale_acc_in.bits.tag := reader.module.io.resp.bits
mvin_scale_acc_out.ready := false.B
}
reader.module.io.resp.ready := Mux(reader.module.io.resp.bits.is_acc && reader.module.io.resp.bits.has_acc_bitwidth,
mvin_scale_acc_in.ready, mvin_scale_in.ready)
val mvin_scale_finished = mvin_scale_pixel_repeater.io.resp.fire && mvin_scale_pixel_repeater.io.resp.bits.last
val mvin_scale_acc_finished = mvin_scale_acc_out.fire && mvin_scale_acc_out.bits.last
val zero_writer_finished = zero_writer_pixel_repeater.io.resp.fire && zero_writer_pixel_repeater.io.resp.bits.last
val zero_writer_bytes_read = Mux(zero_writer_pixel_repeater.io.resp.bits.laddr.is_acc_addr,
zero_writer_pixel_repeater.io.resp.bits.tag.cols * (accType.getWidth / 8).U,
zero_writer_pixel_repeater.io.resp.bits.tag.cols * (inputType.getWidth / 8).U)
// For DMA read responses, mvin_scale gets first priority, then mvin_scale_acc, and then zero_writer
io.dma.read.resp.valid := mvin_scale_finished || mvin_scale_acc_finished || zero_writer_finished
// io.dma.read.resp.bits.cmd_id := MuxCase(zero_writer.io.resp.bits.tag.cmd_id, Seq(
io.dma.read.resp.bits.cmd_id := MuxCase(zero_writer_pixel_repeater.io.resp.bits.tag.cmd_id, Seq(
// mvin_scale_finished -> mvin_scale_out.bits.tag.cmd_id,
mvin_scale_finished -> mvin_scale_pixel_repeater.io.resp.bits.tag.cmd_id,
mvin_scale_acc_finished -> mvin_scale_acc_out.bits.tag.cmd_id))
io.dma.read.resp.bits.bytesRead := MuxCase(zero_writer_bytes_read, Seq(
// mvin_scale_finished -> mvin_scale_out.bits.tag.bytes_read,
mvin_scale_finished -> mvin_scale_pixel_repeater.io.resp.bits.tag.bytes_read,
mvin_scale_acc_finished -> mvin_scale_acc_out.bits.tag.bytes_read))
io.tlb(0) <> writer.module.io.tlb
io.tlb(1) <> reader.module.io.tlb
writer.module.io.flush := io.flush
reader.module.io.flush := io.flush
io.busy := writer.module.io.busy || reader.module.io.busy || write_issue_q.io.deq.valid || write_norm_q.io.deq.valid || write_scale_q.io.deq.valid || write_dispatch_q.valid
val spad_mems = {
val banks = Seq.fill(sp_banks) { Module(new ScratchpadBank(
sp_bank_entries, spad_w,
aligned_to, config.sp_singleported,
use_shared_ext_mem, is_dummy
)) }
val bank_ios = VecInit(banks.map(_.io))
// Reading from the SRAM banks
bank_ios.zipWithIndex.foreach { case (bio, i) =>
if (use_shared_ext_mem) {
io.ext_mem.get.spad(i) <> bio.ext_mem.get
}
val ex_read_req = io.srams.read(i).req
val exread = ex_read_req.valid
// TODO we tie the write dispatch queue's, and write issue queue's, ready and valid signals together here
val dmawrite = write_dispatch_q.valid && write_norm_q.io.enq.ready &&
!write_dispatch_q.bits.laddr.is_garbage() &&
!(bio.write.en && config.sp_singleported.B) &&
!write_dispatch_q.bits.laddr.is_acc_addr && write_dispatch_q.bits.laddr.sp_bank() === i.U
bio.read.req.valid := exread || dmawrite
ex_read_req.ready := bio.read.req.ready
// The ExecuteController gets priority when reading from SRAMs
when (exread) {
bio.read.req.bits.addr := ex_read_req.bits.addr
bio.read.req.bits.fromDMA := false.B
}.elsewhen (dmawrite) {
bio.read.req.bits.addr := write_dispatch_q.bits.laddr.sp_row()
bio.read.req.bits.fromDMA := true.B
when (bio.read.req.fire) {
write_dispatch_q.ready := true.B
write_norm_q.io.enq.valid := true.B
io.dma.write.resp.valid := true.B
}
}.otherwise {
bio.read.req.bits := DontCare
}
val dma_read_resp = Wire(Decoupled(new ScratchpadReadResp(spad_w)))
dma_read_resp.valid := bio.read.resp.valid && bio.read.resp.bits.fromDMA
dma_read_resp.bits := bio.read.resp.bits
val ex_read_resp = Wire(Decoupled(new ScratchpadReadResp(spad_w)))
ex_read_resp.valid := bio.read.resp.valid && !bio.read.resp.bits.fromDMA
ex_read_resp.bits := bio.read.resp.bits
val dma_read_pipe = Pipeline(dma_read_resp, spad_read_delay)
val ex_read_pipe = Pipeline(ex_read_resp, spad_read_delay)
bio.read.resp.ready := Mux(bio.read.resp.bits.fromDMA, dma_read_resp.ready, ex_read_resp.ready)
dma_read_pipe.ready := writer.module.io.req.ready &&
!write_issue_q.io.deq.bits.laddr.is_acc_addr && write_issue_q.io.deq.bits.laddr.sp_bank() === i.U && // I believe we don't need to check that write_issue_q is valid here, because if the SRAM's resp is valid, then that means that the write_issue_q's deq should also be valid
!write_issue_q.io.deq.bits.laddr.is_garbage()
when (dma_read_pipe.fire) {
writeData.valid := true.B
writeData.bits := dma_read_pipe.bits.data
}
io.srams.read(i).resp <> ex_read_pipe
}
// Writing to the SRAM banks
bank_ios.zipWithIndex.foreach { case (bio, i) =>
val exwrite = io.srams.write(i).en
// val laddr = mvin_scale_out.bits.tag.addr.asTypeOf(local_addr_t) + mvin_scale_out.bits.row
val laddr = mvin_scale_pixel_repeater.io.resp.bits.laddr
// val dmaread = mvin_scale_out.valid && !mvin_scale_out.bits.tag.is_acc &&
val dmaread = mvin_scale_pixel_repeater.io.resp.valid && !mvin_scale_pixel_repeater.io.resp.bits.tag.is_acc &&
laddr.sp_bank() === i.U
// We need to make sure that we don't try to return a dma read resp from both zero_writer and either mvin_scale
// or mvin_acc_scale at the same time. The scalers always get priority in those cases
/* val zerowrite = zero_writer.io.resp.valid && !zero_writer.io.resp.bits.laddr.is_acc_addr &&
zero_writer.io.resp.bits.laddr.sp_bank() === i.U && */
val zerowrite = zero_writer_pixel_repeater.io.resp.valid && !zero_writer_pixel_repeater.io.resp.bits.laddr.is_acc_addr &&
zero_writer_pixel_repeater.io.resp.bits.laddr.sp_bank() === i.U &&
// !((mvin_scale_out.valid && mvin_scale_out.bits.last) || (mvin_scale_acc_out.valid && mvin_scale_acc_out.bits.last))
!((mvin_scale_pixel_repeater.io.resp.valid && mvin_scale_pixel_repeater.io.resp.bits.last) || (mvin_scale_acc_out.valid && mvin_scale_acc_out.bits.last))
bio.write.en := exwrite || dmaread || zerowrite
when (exwrite) {
bio.write.addr := io.srams.write(i).addr
bio.write.data := io.srams.write(i).data
bio.write.mask := io.srams.write(i).mask
}.elsewhen (dmaread) {
bio.write.addr := laddr.sp_row()
bio.write.data := mvin_scale_pixel_repeater.io.resp.bits.out.asUInt
bio.write.mask := mvin_scale_pixel_repeater.io.resp.bits.mask take ((spad_w / (aligned_to * 8)) max 1)
mvin_scale_pixel_repeater.io.resp.ready := true.B // TODO we combinationally couple valid and ready signals
}.elsewhen (zerowrite) {
bio.write.addr := zero_writer_pixel_repeater.io.resp.bits.laddr.sp_row()
bio.write.data := 0.U
bio.write.mask := zero_writer_pixel_repeater.io.resp.bits.mask
zero_writer_pixel_repeater.io.resp.ready := true.B // TODO we combinationally couple valid and ready signals
}.otherwise {
bio.write.addr := DontCare
bio.write.data := DontCare
bio.write.mask := DontCare
}
}
banks
}
val acc_row_t = Vec(meshColumns, Vec(tileColumns, accType))
val spad_row_t = Vec(meshColumns, Vec(tileColumns, inputType))
val (acc_norm_unit_in, acc_norm_unit_out) = Normalizer(
is_passthru = !config.has_normalizations,
max_len = block_cols,
num_reduce_lanes = -1,
num_stats = 2,
latency = 4,
fullDataType = acc_row_t,
scale_t = acc_scale_t,
)
acc_norm_unit_in.valid := false.B
acc_norm_unit_in.bits.len := write_norm_q.io.deq.bits.len
acc_norm_unit_in.bits.stats_id := write_norm_q.io.deq.bits.acc_norm_stats_id
acc_norm_unit_in.bits.cmd := write_norm_q.io.deq.bits.laddr.norm_cmd
acc_norm_unit_in.bits.acc_read_resp := DontCare
val acc_scale_unit = Module(new AccumulatorScale(
acc_row_t,
spad_row_t,
acc_scale_t.asInstanceOf[V],
acc_read_small_width,
acc_read_full_width,
acc_scale_func,
acc_scale_num_units,
acc_scale_latency,
has_nonlinear_activations,
has_normalizations,
))
val acc_waiting_to_be_scaled = write_scale_q.io.deq.valid &&
!write_scale_q.io.deq.bits.laddr.is_garbage() &&
write_scale_q.io.deq.bits.laddr.is_acc_addr &&
write_issue_q.io.enq.ready
acc_norm_unit_out.ready := acc_scale_unit.io.in.ready && acc_waiting_to_be_scaled
acc_scale_unit.io.in.valid := acc_norm_unit_out.valid && acc_waiting_to_be_scaled
acc_scale_unit.io.in.bits := acc_norm_unit_out.bits
when (acc_scale_unit.io.in.fire) {
write_issue_q.io.enq <> write_scale_q.io.deq
}
acc_scale_unit.io.out.ready := false.B
val dma_resp_ready =
writer.module.io.req.ready &&
write_issue_q.io.deq.bits.laddr.is_acc_addr &&
!write_issue_q.io.deq.bits.laddr.is_garbage()
when (acc_scale_unit.io.out.bits.fromDMA && dma_resp_ready) {
// Send the acc-scale result into the DMA
acc_scale_unit.io.out.ready := true.B
writeData.valid := acc_scale_unit.io.out.valid
writeData.bits := acc_scale_unit.io.out.bits.data.asUInt
fullAccWriteData := acc_scale_unit.io.out.bits.full_data.asUInt
}
for (i <- 0 until acc_banks) {
// Send the acc-sccale result to the ExController
io.acc.read_resp(i).valid := false.B
io.acc.read_resp(i).bits := acc_scale_unit.io.out.bits
when (!acc_scale_unit.io.out.bits.fromDMA && acc_scale_unit.io.out.bits.acc_bank_id === i.U) {
acc_scale_unit.io.out.ready := io.acc.read_resp(i).ready
io.acc.read_resp(i).valid := acc_scale_unit.io.out.valid
}
}
val acc_adders = Module(new AccPipeShared(acc_latency-1, acc_row_t, acc_banks))
val acc_mems = {
val banks = Seq.fill(acc_banks) { Module(new AccumulatorMem(
acc_bank_entries, acc_row_t, acc_scale_func, acc_scale_t.asInstanceOf[V],
acc_singleported, acc_sub_banks,
use_shared_ext_mem,
acc_latency, accType, is_dummy
)) }
val bank_ios = VecInit(banks.map(_.io))
// Getting the output of the bank that's about to be issued to the writer
val bank_issued_io = bank_ios(write_issue_q.io.deq.bits.laddr.acc_bank())
// Reading from the Accumulator banks
bank_ios.zipWithIndex.foreach { case (bio, i) =>
if (use_shared_ext_mem) {
io.ext_mem.get.acc(i) <> bio.ext_mem.get
}
acc_adders.io.in_sel(i) := bio.adder.valid
acc_adders.io.ina(i) := bio.adder.op1
acc_adders.io.inb(i) := bio.adder.op2
bio.adder.sum := acc_adders.io.out
val ex_read_req = io.acc.read_req(i)
val exread = ex_read_req.valid
// TODO we tie the write dispatch queue's, and write issue queue's, ready and valid signals together here
val dmawrite = write_dispatch_q.valid && write_norm_q.io.enq.ready &&
!write_dispatch_q.bits.laddr.is_garbage() &&
write_dispatch_q.bits.laddr.is_acc_addr && write_dispatch_q.bits.laddr.acc_bank() === i.U
bio.read.req.valid := exread || dmawrite
ex_read_req.ready := bio.read.req.ready
// The ExecuteController gets priority when reading from accumulator banks
when (exread) {
bio.read.req.bits.addr := ex_read_req.bits.addr
bio.read.req.bits.act := ex_read_req.bits.act
bio.read.req.bits.igelu_qb := ex_read_req.bits.igelu_qb
bio.read.req.bits.igelu_qc := ex_read_req.bits.igelu_qc
bio.read.req.bits.iexp_qln2 := ex_read_req.bits.iexp_qln2
bio.read.req.bits.iexp_qln2_inv := ex_read_req.bits.iexp_qln2_inv
bio.read.req.bits.scale := ex_read_req.bits.scale
bio.read.req.bits.full := false.B
bio.read.req.bits.fromDMA := false.B
}.elsewhen (dmawrite) {
bio.read.req.bits.addr := write_dispatch_q.bits.laddr.acc_row()
bio.read.req.bits.full := write_dispatch_q.bits.laddr.read_full_acc_row
bio.read.req.bits.act := write_dispatch_q.bits.acc_act
bio.read.req.bits.igelu_qb := write_dispatch_q.bits.acc_igelu_qb.asTypeOf(bio.read.req.bits.igelu_qb)
bio.read.req.bits.igelu_qc := write_dispatch_q.bits.acc_igelu_qc.asTypeOf(bio.read.req.bits.igelu_qc)
bio.read.req.bits.iexp_qln2 := write_dispatch_q.bits.acc_iexp_qln2.asTypeOf(bio.read.req.bits.iexp_qln2)
bio.read.req.bits.iexp_qln2_inv := write_dispatch_q.bits.acc_iexp_qln2_inv.asTypeOf(bio.read.req.bits.iexp_qln2_inv)
bio.read.req.bits.scale := write_dispatch_q.bits.acc_scale.asTypeOf(bio.read.req.bits.scale)
bio.read.req.bits.fromDMA := true.B
when (bio.read.req.fire) {
write_dispatch_q.ready := true.B
write_norm_q.io.enq.valid := true.B
io.dma.write.resp.valid := true.B
}
}.otherwise {
bio.read.req.bits := DontCare
}
bio.read.resp.ready := false.B
when (write_norm_q.io.deq.valid &&
acc_norm_unit_in.ready &&
bio.read.resp.valid &&
write_scale_q.io.enq.ready &&
write_norm_q.io.deq.bits.laddr.is_acc_addr &&
!write_norm_q.io.deq.bits.laddr.is_garbage() &&
write_norm_q.io.deq.bits.laddr.acc_bank() === i.U)
{
write_norm_q.io.deq.ready := true.B
acc_norm_unit_in.valid := true.B
bio.read.resp.ready := true.B
// Some normalizer commands don't write to main memory, so they don't need to be passed on to the scaling units
write_scale_q.io.enq.valid := NormCmd.writes_to_main_memory(write_norm_q.io.deq.bits.laddr.norm_cmd)
acc_norm_unit_in.bits.acc_read_resp := bio.read.resp.bits
acc_norm_unit_in.bits.acc_read_resp.acc_bank_id := i.U
}
}
// Writing to the accumulator banks
bank_ios.zipWithIndex.foreach { case (bio, i) =>
// Order of precedence during writes is ExecuteController, and then mvin_scale, and then mvin_scale_acc, and
// then zero_writer
val exwrite = io.acc.write(i).valid
io.acc.write(i).ready := true.B
assert(!(exwrite && !bio.write.ready), "Execute controller write to AccumulatorMem was skipped")
// val from_mvin_scale = mvin_scale_out.valid && mvin_scale_out.bits.tag.is_acc
val from_mvin_scale = mvin_scale_pixel_repeater.io.resp.valid && mvin_scale_pixel_repeater.io.resp.bits.tag.is_acc
val from_mvin_scale_acc = mvin_scale_acc_out.valid && mvin_scale_acc_out.bits.tag.is_acc
// val mvin_scale_laddr = mvin_scale_out.bits.tag.addr.asTypeOf(local_addr_t) + mvin_scale_out.bits.row
val mvin_scale_laddr = mvin_scale_pixel_repeater.io.resp.bits.laddr
val mvin_scale_acc_laddr = mvin_scale_acc_out.bits.tag.addr.asTypeOf(local_addr_t) + mvin_scale_acc_out.bits.row
val dmaread_bank = Mux(from_mvin_scale, mvin_scale_laddr.acc_bank(),
mvin_scale_acc_laddr.acc_bank())
val dmaread_row = Mux(from_mvin_scale, mvin_scale_laddr.acc_row(), mvin_scale_acc_laddr.acc_row())
// We need to make sure that we don't try to return a dma read resp from both mvin_scale and mvin_scale_acc
// at the same time. mvin_scale always gets priority in this cases
val spad_last = mvin_scale_pixel_repeater.io.resp.valid && mvin_scale_pixel_repeater.io.resp.bits.last && !mvin_scale_pixel_repeater.io.resp.bits.tag.is_acc
val dmaread = (from_mvin_scale || from_mvin_scale_acc) &&
dmaread_bank === i.U /* &&
(mvin_scale_same.B || from_mvin_scale || !spad_dmaread_last) */
// We need to make sure that we don't try to return a dma read resp from both zero_writer and either mvin_scale
// or mvin_acc_scale at the same time. The scalers always get priority in those cases
/* val zerowrite = zero_writer.io.resp.valid && zero_writer.io.resp.bits.laddr.is_acc_addr &&
zero_writer.io.resp.bits.laddr.acc_bank() === i.U && */
val zerowrite = zero_writer_pixel_repeater.io.resp.valid && zero_writer_pixel_repeater.io.resp.bits.laddr.is_acc_addr &&
zero_writer_pixel_repeater.io.resp.bits.laddr.acc_bank() === i.U &&
// !((mvin_scale_out.valid && mvin_scale_out.bits.last) || (mvin_scale_acc_out.valid && mvin_scale_acc_out.bits.last))
!((mvin_scale_pixel_repeater.io.resp.valid && mvin_scale_pixel_repeater.io.resp.bits.last) || (mvin_scale_acc_out.valid && mvin_scale_acc_out.bits.last))
val consecutive_write_block = RegInit(false.B)
if (acc_singleported) {
val consecutive_write_sub_bank = RegInit(0.U((1 max log2Ceil(acc_sub_banks)).W))
when (bio.write.fire && bio.write.bits.acc &&
(bio.write.bits.addr(log2Ceil(acc_sub_banks)-1,0) === consecutive_write_sub_bank)) {
consecutive_write_block := true.B
} .elsewhen (bio.write.fire && bio.write.bits.acc) {
consecutive_write_block := false.B
consecutive_write_sub_bank := bio.write.bits.addr(log2Ceil(acc_sub_banks)-1,0)
} .otherwise {
consecutive_write_block := false.B
}
}
bio.write.valid := false.B
// bio.write.bits.acc := MuxCase(zero_writer.io.resp.bits.laddr.accumulate,
bio.write.bits.acc := MuxCase(zero_writer_pixel_repeater.io.resp.bits.laddr.accumulate,
Seq(exwrite -> io.acc.write(i).bits.acc,
// from_mvin_scale -> mvin_scale_out.bits.tag.accumulate,
from_mvin_scale -> mvin_scale_pixel_repeater.io.resp.bits.tag.accumulate,
from_mvin_scale_acc -> mvin_scale_acc_out.bits.tag.accumulate))
// bio.write.bits.addr := MuxCase(zero_writer.io.resp.bits.laddr.acc_row(),
bio.write.bits.addr := MuxCase(zero_writer_pixel_repeater.io.resp.bits.laddr.acc_row(),
Seq(exwrite -> io.acc.write(i).bits.addr,
(from_mvin_scale || from_mvin_scale_acc) -> dmaread_row))
when (exwrite) {
bio.write.valid := true.B
bio.write.bits.data := io.acc.write(i).bits.data
bio.write.bits.mask := io.acc.write(i).bits.mask
}.elsewhen (dmaread && !spad_last && !consecutive_write_block) {
bio.write.valid := true.B
bio.write.bits.data := Mux(from_mvin_scale,
// VecInit(mvin_scale_out.bits.out.map(e => e.withWidthOf(accType))).asTypeOf(acc_row_t),
VecInit(mvin_scale_pixel_repeater.io.resp.bits.out.map(e => e.withWidthOf(accType))).asTypeOf(acc_row_t),
mvin_scale_acc_out.bits.out.asTypeOf(acc_row_t))
bio.write.bits.mask :=
Mux(from_mvin_scale,
{
val n = accType.getWidth / inputType.getWidth
// val mask = mvin_scale_out.bits.tag.mask take ((spad_w / (aligned_to * 8)) max 1)
val mask = mvin_scale_pixel_repeater.io.resp.bits.mask take ((spad_w / (aligned_to * 8)) max 1)
val expanded = VecInit(mask.flatMap(e => Seq.fill(n)(e)))
expanded
},
mvin_scale_acc_out.bits.tag.mask)
when(from_mvin_scale) {
mvin_scale_pixel_repeater.io.resp.ready := bio.write.ready
}.otherwise {
mvin_scale_acc_out.ready := bio.write.ready
}
}.elsewhen (zerowrite && !spad_last && !consecutive_write_block) {
bio.write.valid := true.B
bio.write.bits.data := 0.U.asTypeOf(acc_row_t)
bio.write.bits.mask := {
val n = accType.getWidth / inputType.getWidth
val mask = zero_writer_pixel_repeater.io.resp.bits.mask
val expanded = VecInit(mask.flatMap(e => Seq.fill(n)(e)))
expanded
}
zero_writer_pixel_repeater.io.resp.ready := bio.write.ready
}.otherwise {
bio.write.bits.data := DontCare
bio.write.bits.mask := DontCare
}
}
banks
}
// Counter connection
io.counter := DontCare
io.counter.collect(reader.module.io.counter)
io.counter.collect(writer.module.io.counter)
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File NormCmd.scala:
package gemmini
import chisel3._
import chisel3.util._
object NormCmd extends ChiselEnum {
val RESET, SUM, MEAN, VARIANCE, INV_STDDEV, MAX, SUM_EXP, INV_SUM_EXP = Value
def writes_to_main_memory(cmd: Type): Bool = {
cmd === RESET
}
def non_reset_version(cmd: Type): Type = {
MuxCase(cmd, Seq(
(cmd === MEAN) -> SUM,
(cmd === MAX) -> MAX,
(cmd === INV_STDDEV) -> VARIANCE,
(cmd === INV_SUM_EXP) -> SUM_EXP
))
}
}
File Pipeline.scala:
package gemmini
import chisel3._
import chisel3.util._
class Pipeline[T <: Data] (gen: T, latency: Int)(comb: Seq[T => T] = Seq.fill(latency+1)((x: T) => x)) extends Module {
val io = IO(new Bundle {
val in = Flipped(Decoupled(gen))
val out = Decoupled(gen)
val busy = Output(Bool())
})
require(comb.size == latency+1, "length of combinational is incorrect")
if (latency == 0) {
io.in.ready := io.out.ready
io.out.valid := io.in.valid
io.out.bits := comb.head(io.in.bits)
io.busy := io.in.valid
} else {
val stages = Reg(Vec(latency, gen))
val valids = RegInit(VecInit(Seq.fill(latency)(false.B)))
val stalling = VecInit(Seq.fill(latency)(false.B))
io.busy := io.in.valid || valids.reduce(_||_)
// Stall signals
io.in.ready := !stalling.head
stalling.last := valids.last && !io.out.ready
(stalling.init, stalling.tail, valids.init).zipped.foreach { case (s1, s2, v1) =>
s1 := v1 && s2
}
// Valid signals
// When the pipeline stage ahead of you isn't stalling, then make yourself invalid
io.out.valid := valids.last
when(io.out.ready) {
valids.last := false.B
}
(valids.init, stalling.tail).zipped.foreach { case (v1, s2) =>
when(!s2) {
v1 := false.B
}
}
// When the pipeline stage behind you is valid then become true
when(io.in.fire) {
valids.head := true.B
}
(valids.tail, valids.init).zipped.foreach { case (v2, v1) =>
when(v1) {
v2 := true.B
}
}
// Stages
when(io.in.fire) {
stages.head := comb.head(io.in.bits)
}
io.out.bits := comb.last(stages.last)
((stages.tail zip stages.init) zip (stalling.tail zip comb.tail.init)).foreach { case ((st2, st1), (s2, c1)) =>
when(!s2) {
st2 := c1(st1)
}
}
}
}
object Pipeline {
def apply[T <: Data](in: ReadyValidIO[T], latency: Int, comb: Seq[T => T]): DecoupledIO[T] = {
val p = Module(new Pipeline(in.bits.cloneType, latency)(comb))
p.io.in <> in
p.io.out
}
def apply[T <: Data](in: ReadyValidIO[T], latency: Int): DecoupledIO[T] = {
val p = Module(new Pipeline(in.bits.cloneType, latency)())
p.io.in <> in
p.io.out
}
}
File Normalizer.scala:
package gemmini
import chisel3._
import chisel3.util._
import gemmini.AccumulatorScale.iexp
import hardfloat.{DivSqrtRecFN_small, INToRecFN, MulRecFN, consts, fNFromRecFN, recFNFromFN}
class NormalizedInput[T <: Data: Arithmetic, U <: Data](max_len: Int, num_stats: Int, fullDataType: Vec[Vec[T]],
scale_t: U) extends Bundle {
val acc_read_resp = new AccumulatorReadResp[T,U](fullDataType, scale_t)
val len = UInt(log2Up(max_len + 1).W)
val stats_id = UInt(log2Up(num_stats).W)
val cmd = NormCmd()
}
class NormalizedOutput[T <: Data: Arithmetic, U <: Data](fullDataType: Vec[Vec[T]], scale_t: U) extends Bundle {
val acc_read_resp = new AccumulatorReadResp[T,U](fullDataType, scale_t)
val mean = fullDataType.head.head.cloneType
val max = fullDataType.head.head.cloneType
val inv_stddev = scale_t.cloneType
val inv_sum_exp = scale_t.cloneType
}
class IExpConst[T <: Data](acc_t: T) extends Bundle {
val qb = acc_t.cloneType
val qc = acc_t.cloneType
val qln2 = acc_t.cloneType
val qln2_inv = acc_t.cloneType
}
class AccumulationLanes[T <: Data](num_stats: Int, acc_t: T, n_lanes: Int, latency: Int)(implicit ev: Arithmetic[T])
extends Module {
// Each lane computes a sum, or an error-squared sum
import ev._
class LaneOutput extends Bundle {
val result = acc_t.cloneType
val stats_id = UInt(log2Up(num_stats).W)
}
val io = IO(new Bundle {
val ins = Flipped(Valid(new Bundle {
val len = UInt(log2Up(n_lanes+1).W)
val data = Vec(n_lanes, acc_t)
val mean = acc_t.cloneType
val max = acc_t.cloneType
val iexp_const = new IExpConst(acc_t)
val cmd = NormCmd()
val stats_id = UInt(log2Up(num_stats).W)
}))
val out = Valid(new LaneOutput)
val busy = Output(Bool())
})
val cmd = io.ins.bits.cmd
val mean = io.ins.bits.mean
val iexp_c = io.ins.bits.iexp_const
val data = io.ins.bits.data.zipWithIndex.map { case (d, i) =>
val iexp_result = iexp(d - io.ins.bits.max, iexp_c.qln2, iexp_c.qln2_inv, iexp_c.qb, iexp_c.qc)
Mux(i.U < io.ins.bits.len,
MuxCase(d, Seq(
(cmd === NormCmd.VARIANCE || cmd === NormCmd.INV_STDDEV) -> (d-mean)*(d-mean),
(cmd === NormCmd.SUM_EXP || cmd === NormCmd.INV_SUM_EXP) ->
iexp_result //iexp(d - io.ins.bits.max, iexp_c.qln2, iexp_c.qln2_inv, iexp_c.qb, iexp_c.qc)
)).withWidthOf(acc_t),
d.zero)
}
val result = data.reduce(_ + _)
val pipe = Module(new Pipeline[LaneOutput](new LaneOutput, latency)())
pipe.io.in.valid := io.ins.valid
// io.ins.ready := pipe.io.in.ready
pipe.io.in.bits.result := result
pipe.io.in.bits.stats_id := io.ins.bits.stats_id
io.out.valid := pipe.io.out.valid
pipe.io.out.ready := true.B
// pipe.io.out.ready := io.out.ready
io.out.bits := pipe.io.out.bits
io.busy := pipe.io.busy
}
class MaxLanes[T <: Data](num_stats: Int, acc_t: T, n_lanes: Int, latency: Int)(implicit ev: Arithmetic[T])
extends Module {
// Each lane computes a sum, or an error-squared sum
import ev._
class LaneOutput extends Bundle {
val result = acc_t.cloneType
val stats_id = UInt(log2Up(num_stats).W)
}
val io = IO(new Bundle {
val ins = Flipped(Valid(new Bundle {
val len = UInt(log2Up(n_lanes + 1).W)
val data = Vec(n_lanes, acc_t)
val stats_id = UInt(log2Up(num_stats).W)
}))
val out = Valid(new LaneOutput)
val busy = Output(Bool())
})
val data = io.ins.bits.data.zipWithIndex.map { case (d, i) =>
Mux(i.U < io.ins.bits.len, d.withWidthOf(acc_t), d.minimum)
}
def treeMax(x: Seq[T]): T = {
if (x.length == 1) {
x.head
} else {
val a = treeMax(x.slice(0, x.length / 2)) // ayy slice
val b = treeMax(x.slice(x.length / 2, x.length))
Mux(a > b, a, b)
}
}
val result = treeMax(data)
val pipe = Module(new Pipeline[LaneOutput](new LaneOutput, latency)())
pipe.io.in.valid := io.ins.valid
// io.ins.ready := pipe.io.in.ready
pipe.io.in.bits.result := result
pipe.io.in.bits.stats_id := io.ins.bits.stats_id
io.out.valid := pipe.io.out.valid
pipe.io.out.ready := true.B
// pipe.io.out.ready := io.out.ready
io.out.bits := pipe.io.out.bits
io.busy := pipe.io.busy
}
class IntSqrt(width: Int) extends Module {
val N = (width + 1) >> 1
val input = IO(Flipped(Decoupled(UInt(width.W))))
val output = IO(Decoupled(UInt(N.W)))
val x = Reg(UInt(width.W))
val a = Reg(UInt(width.W))
val t = Wire(UInt(width.W))
val q = Reg(UInt(N.W))
val sign = Wire(UInt(1.W))
val busy = RegInit(false.B)
val resultValid = RegInit(false.B)
val counter = Reg(UInt(log2Ceil(N).W))
input.ready := ! busy
output.valid := resultValid
output.bits := DontCare
t := Cat(a, x(width - 1, width - 2)) - Cat(q, 1.U(2.W))
sign := t(width - 1)
output.bits := q
when(busy) {
when (!resultValid) {
counter := counter - 1.U
x := Cat(x(width - 3, 0), 0.U(2.W))
a := Mux(sign.asBool, Cat(a(width - 3, 0), x(width - 1, width - 2)), t)
q := Cat(q(N - 2, 0), ~sign)
when(counter === 0.U) {
resultValid := true.B
}
}
when(output.ready && resultValid) {
busy := false.B
resultValid := false.B
}
}.otherwise {
when(input.valid) {
val inputBundle = input.deq()
x := inputBundle
a := 0.U
q := 0.U
busy := true.B
counter := (N - 1).U
}
}
}
class MulPipe[T <: Data, U <: Data](scale_t: U)(implicit ev: Arithmetic[T])
extends Module {
val io = IO(new Bundle {
val ins = Flipped(Decoupled(new Bundle {
val x = scale_t.cloneType
val y = scale_t.cloneType
}))
val out = Decoupled(scale_t.cloneType)
})
scale_t match {
case Float(expWidth, sigWidth) =>
val self_rec = recFNFromFN(expWidth, sigWidth, io.ins.bits.x.asUInt)
val scale_rec = recFNFromFN(expWidth, sigWidth, io.ins.bits.y.asUInt)
val mul = Module(new MulRecFN(expWidth, sigWidth))
mul.io.roundingMode := consts.round_near_even
mul.io.detectTininess := consts.tininess_afterRounding
mul.io.a := self_rec
mul.io.b := scale_rec
val mul_result = fNFromRecFN(expWidth, sigWidth, mul.io.out).asTypeOf(scale_t)
val pipe = Module(new Pipeline(scale_t.cloneType, 2)())
pipe.io.in.valid := io.ins.valid
pipe.io.in.bits := mul_result
io.ins.ready := pipe.io.in.ready
// pipe.io.out.ready := io.out.ready
// io.out.bits := pipe.io.out.bits
// io.out.valid := pipe.io.out.valid
io.out <> pipe.io.out
}
}
class Normalizer[T <: Data, U <: Data](max_len: Int, num_reduce_lanes: Int, num_stats: Int, latency: Int,
fullDataType: Vec[Vec[T]], scale_t: U)
(implicit ev: Arithmetic[T]) extends Module {
import ev._
val acc_t = fullDataType.head.head.cloneType
val vec_size = fullDataType.flatten.size
val n_lanes = if (num_reduce_lanes < 0) vec_size else num_reduce_lanes
assert(isPow2(n_lanes))
val io = IO(new Bundle {
val in = Flipped(Decoupled(new NormalizedInput[T,U](max_len, num_stats, fullDataType, scale_t)))
val out = Decoupled(new NormalizedOutput(fullDataType, scale_t))
})
object State extends ChiselEnum {
// NOTE: We assume that "idle" and "output" are the first two states. We also assume that all the enums on the same
// line keep the order below
val idle, output = Value
val get_sum = Value
val get_mean, waiting_for_mean = Value
val get_variance, waiting_for_variance, get_stddev, waiting_for_stddev = Value
val get_inv_stddev, waiting_for_inv_stddev = Value
val get_scaled_inv_stddev, waiting_for_scaled_inv_stddev = Value
val get_max = Value
val get_inv_sum_exp, waiting_for_inv_sum_exp = Value
val get_scaled_inv_sum_exp, waiting_for_scaled_inv_sum_exp = Value
}
import State._
// Buffers for normalization stats
class Stats extends Bundle {
val req = new NormalizedInput[T,U](max_len, num_stats, fullDataType, scale_t)
val state = State()
// Running state
val sum = acc_t.cloneType
val count = UInt(16.W) // TODO magic number
val running_max = acc_t.cloneType
val max = acc_t.cloneType
// Iterative state
val mean = acc_t.cloneType
val inv_stddev = acc_t.cloneType
val inv_sum_exp = acc_t.cloneType
val elems_left = req.len.cloneType
def vec_grouped = VecInit(req.acc_read_resp.data.flatten.grouped(n_lanes).map(v => VecInit(v)).toSeq)
def vec_groups_left = elems_left / n_lanes.U + (elems_left % n_lanes.U =/= 0.U)
def cmd = req.cmd
def waiting_for_lanes_to_drain =
(cmd === NormCmd.MEAN && (state === get_sum || state === get_mean)) ||
(cmd === NormCmd.INV_STDDEV && (state === get_sum || state === get_variance)) ||
(cmd === NormCmd.MAX && (state === get_max)) ||
(cmd === NormCmd.INV_SUM_EXP && (state === get_sum))
}
val stats = Reg(Vec(num_stats, new Stats))
val done_with_functional_units = Wire(Vec(num_stats, Bool()))
val next_states = Wire(Vec(num_stats, State()))
(stats.map(_.state) zip next_states).foreach { case (s, ns) => s := ns }
// IO
val in_stats_id = io.in.bits.stats_id
io.in.ready := (stats(in_stats_id).state === idle || done_with_functional_units(in_stats_id)) &&
stats.map(!_.waiting_for_lanes_to_drain).reduce(_ && _)
val out_stats_id = MuxCase((num_stats-1).U,
stats.zipWithIndex.map { case (s,i) => (s.state === output) -> i.U }
)
io.out.valid := stats(out_stats_id).state === output
io.out.bits.acc_read_resp := stats(out_stats_id).req.acc_read_resp
io.out.bits.mean := stats(out_stats_id).mean
io.out.bits.max := stats(out_stats_id).max
io.out.bits.inv_stddev := stats(out_stats_id).inv_stddev.asTypeOf(scale_t)
io.out.bits.inv_sum_exp := stats(out_stats_id).inv_sum_exp.asTypeOf(scale_t)
// Lanes and functional units
val lanes = Module(new AccumulationLanes(num_stats, acc_t, n_lanes, latency))
val max_lanes = Module(new MaxLanes(num_stats, acc_t, n_lanes, latency)) // TODO: change latency?
{
// Lanes input
val in_lanes_stats_id = MuxCase((num_stats-1).U,
stats.zipWithIndex.map { case (s,i) => (s.state === get_sum) -> i.U }
)
val stat = stats(in_lanes_stats_id)
val len = Mux(stat.elems_left % n_lanes.U === 0.U, n_lanes.U, stat.elems_left % n_lanes.U)
lanes.io.ins.valid := stat.state === get_sum && stat.vec_groups_left > 0.U &&
!max_lanes.io.busy // TODO We should be able to start the accumulation lanes if the max-lanes are busy with a different stat-id
lanes.io.ins.bits.data := stat.vec_grouped(stat.vec_groups_left-1.U)
lanes.io.ins.bits.mean := stat.mean
lanes.io.ins.bits.max := stat.max
val iexp_const = Wire(new IExpConst(acc_t))
iexp_const.qln2 := io.in.bits.acc_read_resp.iexp_qln2.asTypeOf(iexp_const.qln2)
iexp_const.qln2_inv := io.in.bits.acc_read_resp.iexp_qln2_inv.asTypeOf(iexp_const.qln2_inv)
iexp_const.qb := io.in.bits.acc_read_resp.igelu_qb.asTypeOf(iexp_const.qb)
iexp_const.qc := io.in.bits.acc_read_resp.igelu_qc.asTypeOf(iexp_const.qc)
lanes.io.ins.bits.cmd := stat.cmd
lanes.io.ins.bits.len := len
lanes.io.ins.bits.stats_id := in_lanes_stats_id
lanes.io.ins.bits.iexp_const := iexp_const
when (lanes.io.ins.fire) {
stat.elems_left := stat.elems_left - len
}
}
{
// Lanes output
val out_lanes_stats_id = lanes.io.out.bits.stats_id
val stat = stats(out_lanes_stats_id)
when (lanes.io.out.fire) {
stat.sum := stat.sum + lanes.io.out.bits.result
}
}
{
// Max lanes input
val max_in_lanes_stats_id = MuxCase((num_stats-1).U,
stats.zipWithIndex.map { case (s,i) => (s.state === get_max) -> i.U }
)
val stat = stats(max_in_lanes_stats_id)
val len = Mux(stat.elems_left % n_lanes.U === 0.U, n_lanes.U, stat.elems_left % n_lanes.U)
max_lanes.io.ins.valid := stat.state === get_max && stat.vec_groups_left > 0.U
max_lanes.io.ins.bits.data := stat.vec_grouped(stat.vec_groups_left-1.U)
max_lanes.io.ins.bits.len := len
max_lanes.io.ins.bits.stats_id := max_in_lanes_stats_id
when (max_lanes.io.ins.fire) {
stat.elems_left := stat.elems_left - len
}
}
{
// Max lanes output
val max_out_lanes_stats_id = max_lanes.io.out.bits.stats_id
val stat = stats(max_out_lanes_stats_id)
when (max_lanes.io.out.fire) {
val new_max = Mux(max_lanes.io.out.bits.result > stat.running_max, max_lanes.io.out.bits.result, stat.running_max)
stat.running_max := new_max
stat.max := new_max
}
}
val sum_to_divide_id = MuxCase((num_stats-1).U,
stats.zipWithIndex.map { case (s,i) =>
(s.state === get_mean || s.state === get_variance) -> i.U }
)
val sum_to_divide = stats(sum_to_divide_id).sum
val (divider_in, divider_out) = sum_to_divide.divider(stats.head.count, 16).get
{
// Divider input
val stat = stats(sum_to_divide_id)
divider_in.valid := (stat.state === get_mean || stat.state === get_variance) && !lanes.io.busy
divider_in.bits := stat.count
}
{
// Divider output
val waiting_for_divide_id = MuxCase((num_stats-1).U,
stats.zipWithIndex.map { case (s,i) =>
(s.state === waiting_for_mean || s.state === waiting_for_variance) -> i.U }
)
val stat = stats(waiting_for_divide_id)
divider_out.ready := stat.state === waiting_for_mean || stat.state === waiting_for_variance
when(stat.state === waiting_for_mean) {
stat.mean := divider_out.bits
}.elsewhen(stat.state === waiting_for_variance) {
stat.inv_stddev := divider_out.bits
}
}
val variance_to_sqrt_id = MuxCase((num_stats-1).U,
stats.zipWithIndex.map { case (s,i) =>
(s.state === get_stddev) -> i.U }
)
val variance_to_sqrt = stats(variance_to_sqrt_id).inv_stddev
val sqrt_unit = Module(new IntSqrt(acc_t.getWidth))
val sqrt_in = sqrt_unit.input
val sqrt_out = sqrt_unit.output
// val (sqrt_in, sqrt_out) = variance_to_sqrt.sqrt.get
{
// Sqrt input
val stat = stats(variance_to_sqrt_id)
sqrt_in.bits := variance_to_sqrt.asUInt
sqrt_in.valid := stat.state === get_stddev
}
{
// Sqrt output
val waiting_for_sqrt_id = MuxCase((num_stats-1).U,
stats.zipWithIndex.map { case (s,i) =>
(s.state === waiting_for_stddev) -> i.U }
)
val stat = stats(waiting_for_sqrt_id)
sqrt_out.ready := stat.state === waiting_for_stddev
// TODO this fallback for stddev === 0 only works if acc_t is an SInt
assert(acc_t.isInstanceOf[SInt])
when (stat.state === waiting_for_stddev) {
stat.inv_stddev := Mux(sqrt_out.bits.asUInt === acc_t.zero.asUInt,
1.S(acc_t.getWidth.W).asTypeOf(acc_t),
sqrt_out.bits.asTypeOf(acc_t)
)
}
}
val stddev_to_inv_id = MuxCase((num_stats-1).U,
stats.zipWithIndex.map { case (s,i) =>
(s.state === get_inv_stddev) -> i.U }
)
val stddev_to_inv = stats(stddev_to_inv_id).inv_stddev
val (reciprocal_in, reciprocal_out) = stddev_to_inv.reciprocal(scale_t, 16).get
{
// Reciprocal input
val stat = stats(stddev_to_inv_id)
reciprocal_in.valid := stat.state === get_inv_stddev
reciprocal_in.bits := DontCare
}
{
// Reciprocal output
val waiting_for_reciprocal_id = MuxCase((num_stats-1).U,
stats.zipWithIndex.map { case (s,i) =>
(s.state === waiting_for_inv_stddev) -> i.U }
)
val stat = stats(waiting_for_reciprocal_id)
reciprocal_out.ready := stat.state === waiting_for_inv_stddev
when (stat.state === waiting_for_inv_stddev) {
stat.inv_stddev := reciprocal_out.bits.asTypeOf(stat.inv_stddev)
}
}
val inv_stddev_to_scale_id = MuxCase((num_stats-1).U,
stats.zipWithIndex.map { case (s,i) =>
(s.state === get_scaled_inv_stddev) -> i.U }
)
val inv_stddev_scale_mul_pipe = Module(new MulPipe(scale_t))
{
// Scale input
val stat = stats(inv_stddev_to_scale_id)
val ins = inv_stddev_scale_mul_pipe.io.ins
ins.bits.x := stats(inv_stddev_to_scale_id).inv_stddev.asTypeOf(scale_t)
ins.bits.y := stats(inv_stddev_to_scale_id).req.acc_read_resp.scale
ins.valid := stat.state === get_scaled_inv_stddev
}
{
// Scale output
val waiting_for_scale_id = MuxCase((num_stats-1).U,
stats.zipWithIndex.map { case (s,i) =>
(s.state === waiting_for_scaled_inv_stddev) -> i.U }
)
val stat = stats(waiting_for_scale_id)
val out = inv_stddev_scale_mul_pipe.io.out
out.ready := stat.state === waiting_for_scaled_inv_stddev
when (stat.state === waiting_for_scaled_inv_stddev) {
stat.inv_stddev := out.bits.asTypeOf(stat.inv_stddev)
}
}
val sum_exp_to_inv_id = MuxCase((num_stats-1).U,
stats.zipWithIndex.map { case (s,i) =>
(s.state === get_inv_sum_exp) -> i.U }
)
val sum_exp_to_inv = stats(sum_exp_to_inv_id).sum
val exp_divider_in = Wire(Decoupled(UInt(0.W)))
val exp_divider_out = Wire(Decoupled(scale_t.cloneType))
scale_t match {
case Float(expWidth, sigWidth) =>
exp_divider_in.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = sum_exp_to_inv.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(sum_exp_to_inv.asUInt.asSInt)
val one_rec = in_to_float(127.S) // softmax maximum is 127 for signed int8
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 16))
exp_divider_in.ready := divider.io.inReady
divider.io.inValid := exp_divider_in.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
exp_divider_out.valid := divider.io.outValid_div
exp_divider_out.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(scale_t)
}
{
// Divider input
val stat = stats(sum_exp_to_inv_id)
exp_divider_in.valid := (stat.state === get_inv_sum_exp) && !lanes.io.busy
exp_divider_in.bits := sum_exp_to_inv.asUInt
}
{
// Divider output
val waiting_for_divide_id = MuxCase((num_stats-1).U,
stats.zipWithIndex.map { case (s,i) =>
(s.state === waiting_for_inv_sum_exp) -> i.U }
)
val stat = stats(waiting_for_divide_id)
exp_divider_out.ready := stat.state === waiting_for_inv_sum_exp
when (stat.state === waiting_for_inv_sum_exp) {
stat.inv_sum_exp := exp_divider_out.bits.asTypeOf(stat.inv_sum_exp)
}
}
val inv_sum_exp_to_scale_id = MuxCase((num_stats-1).U,
stats.zipWithIndex.map { case (s,i) =>
(s.state === get_scaled_inv_sum_exp) -> i.U }
)
val inv_sum_exp_scale_mul_pipe = Module(new MulPipe(scale_t))
{
// Scale input
val stat = stats(inv_sum_exp_to_scale_id)
val ins = inv_sum_exp_scale_mul_pipe.io.ins
ins.bits.x := stats(inv_sum_exp_to_scale_id).inv_sum_exp.asTypeOf(scale_t)
ins.bits.y := stats(inv_sum_exp_to_scale_id).req.acc_read_resp.scale
ins.valid := stat.state === get_scaled_inv_sum_exp
}
{
// Scale output
val waiting_for_scale_id = MuxCase((num_stats-1).U,
stats.zipWithIndex.map { case (s,i) =>
(s.state === waiting_for_scaled_inv_sum_exp) -> i.U }
)
val stat = stats(waiting_for_scale_id)
val out = inv_sum_exp_scale_mul_pipe.io.out
out.ready := stat.state === waiting_for_scaled_inv_sum_exp
when (stat.state === waiting_for_scaled_inv_sum_exp) {
stat.inv_sum_exp := out.bits.asTypeOf(stat.inv_sum_exp)
}
}
// State transitions
for (((stat, next_state), id) <- (stats zip next_states).zipWithIndex) {
val state = stat.state
val cmd = stat.cmd
val done = done_with_functional_units(id)
when (state === idle) {
// We have a different "when" statement below to support the case where a new row is input into the normalizer
next_state := idle
done := DontCare
}.elsewhen(state === output) {
next_state := Mux(io.out.fire && out_stats_id === id.U, idle, state)
done := io.out.fire && out_stats_id === id.U
}.elsewhen(state === get_max) {
val is_last_lane_input = stat.vec_groups_left === 0.U ||
(stat.vec_groups_left === 1.U &&
max_lanes.io.ins.bits.stats_id === id.U &&
max_lanes.io.ins.fire)
next_state := Mux(
is_last_lane_input,
MuxCase(state, Seq(
(cmd === NormCmd.MAX) -> idle,
(cmd === NormCmd.SUM_EXP || cmd === NormCmd.INV_SUM_EXP) -> get_sum
)),
state
)
done := is_last_lane_input && cmd === NormCmd.MAX
}.elsewhen(state === get_sum) {
val is_last_lane_input = stat.vec_groups_left === 0.U ||
(stat.vec_groups_left === 1.U &&
lanes.io.ins.bits.stats_id === id.U &&
lanes.io.ins.fire)
next_state := Mux(
is_last_lane_input,
MuxCase(state, Seq(
(cmd === NormCmd.SUM || cmd === NormCmd.VARIANCE || cmd === NormCmd.SUM_EXP) -> idle,
(cmd === NormCmd.MEAN) -> get_mean,
(cmd === NormCmd.INV_STDDEV) -> get_variance,
(cmd === NormCmd.INV_SUM_EXP) -> get_inv_sum_exp,
)),
state
)
// next_state := Mux(cmd === NormCmd.SUM || cmd === NormCmd.VARIANCE,
// Mux(is_last_lane_input, idle, state),
// Mux(is_last_lane_input,
// Mux(cmd === NormCmd.MEAN, get_mean, get_variance),
// state)
// )
done := is_last_lane_input && cmd =/= NormCmd.MEAN && cmd =/= NormCmd.INV_STDDEV && cmd =/= NormCmd.INV_SUM_EXP
}.elsewhen(state === get_mean || state === get_variance) {
next_state := Mux(divider_in.fire && sum_to_divide_id === id.U, state.next, state)
done := false.B
}.elsewhen(state === waiting_for_mean) {
next_state := Mux(divider_out.fire, idle, state)
done := divider_out.fire
}.elsewhen(state === waiting_for_variance) {
next_state := Mux(divider_out.fire, get_stddev, state)
done := false.B
}.elsewhen(state === get_stddev) {
next_state := Mux(sqrt_in.fire && variance_to_sqrt_id === id.U, state.next, state)
done := false.B
}.elsewhen(state === waiting_for_stddev) {
next_state := Mux(sqrt_out.fire, state.next, state)
done := false.B
}.elsewhen(state === get_inv_stddev) {
next_state := Mux(reciprocal_in.fire && stddev_to_inv_id === id.U, state.next, state)
done := false.B
}.elsewhen(state === waiting_for_inv_stddev) {
next_state := Mux(reciprocal_out.fire, state.next, state)
done := false.B
}.elsewhen(state === get_scaled_inv_stddev) {
next_state := Mux(inv_stddev_scale_mul_pipe.io.ins.fire && inv_stddev_to_scale_id === id.U, state.next, state)
done := false.B
}.elsewhen(state === waiting_for_scaled_inv_stddev) {
next_state := Mux(inv_stddev_scale_mul_pipe.io.out.fire, idle, state)
done := inv_stddev_scale_mul_pipe.io.out.fire
}.elsewhen(state === get_inv_sum_exp) {
next_state := Mux(exp_divider_in.fire && sum_exp_to_inv_id === id.U, state.next, state)
done := false.B
}.elsewhen(state === waiting_for_inv_sum_exp) {
next_state := Mux(exp_divider_out.fire, state.next, state)
done := false.B
}.elsewhen(state === get_scaled_inv_sum_exp) {
next_state := Mux(inv_sum_exp_scale_mul_pipe.io.ins.fire && inv_sum_exp_to_scale_id === id.U, state.next, state)
done := false.B
}.elsewhen(state === waiting_for_scaled_inv_sum_exp) {
next_state := Mux(inv_sum_exp_scale_mul_pipe.io.out.fire, idle, state)
done := inv_sum_exp_scale_mul_pipe.io.out.fire
}.otherwise {
assert(false.B, "invalid state in Normalizer")
next_state := DontCare
done := DontCare
}
when (io.in.fire && in_stats_id === id.U) {
next_state := Mux(io.in.bits.cmd === NormCmd.RESET, output,
Mux(io.in.bits.cmd === NormCmd.MAX, get_max, get_sum))
}
}
// Update stats variables
for (((stat, next_state), id) <- (stats zip next_states).zipWithIndex) {
val state = stat.state
val reset_running_state =
state === output ||
(state === get_mean && next_state =/= get_mean) ||
(state === get_variance && next_state =/= get_variance)
val is_input = io.in.fire && in_stats_id === id.U
when (is_input) {
stat.req := io.in.bits
stat.count := stat.count + io.in.bits.len
stat.elems_left := io.in.bits.len
}
when(reset_running_state) {
stat.sum := acc_t.zero
stat.count := Mux(is_input, io.in.bits.len, 0.U)
stat.running_max := acc_t.minimum
}
// when (state =/= get_max && next_state === get_max) {
// stat.running_max := acc_t.minimum
// stat.max := acc_t.minimum
// }
}
// Assertions
assert(PopCount(stats.map(s => s.state === waiting_for_mean || s.state === waiting_for_variance)) <= 1.U, "we don't support pipelining the divider/sqrt-unit/inv-unit right now")
assert(PopCount(stats.map(_.state === waiting_for_stddev)) <= 1.U, "we don't support pipelining the divider/sqrt-unit/inv-unit right now")
assert(PopCount(stats.map(_.state === waiting_for_inv_stddev)) <= 1.U, "we don't support pipelining the divider/sqrt-unit/inv-unit right now")
// assert(PopCount(stats.map(_.state === output)) <= 1.U, "multiple outputs at same time")
assert(acc_t.getWidth == scale_t.getWidth, "we use the same variable to hold both the variance and the inv-stddev, so we need them to see the width")
// Resets
when (reset.asBool) {
stats.foreach(_.state := idle)
stats.foreach(_.sum := acc_t.zero)
stats.foreach(_.max := acc_t.minimum)
stats.foreach(_.running_max := acc_t.minimum)
stats.foreach(_.count := 0.U)
stats.foreach(_.inv_sum_exp := acc_t.zero)
}
}
object Normalizer {
def apply[T <: Data, U <: Data](is_passthru: Boolean, max_len: Int, num_reduce_lanes: Int, num_stats: Int,
latency: Int, fullDataType: Vec[Vec[T]], scale_t: U)(implicit ev: Arithmetic[T]):
(DecoupledIO[NormalizedInput[T,U]], DecoupledIO[NormalizedOutput[T,U]]) = {
if (is_passthru) {
passthru(max_len = max_len, num_stats = num_stats, fullDataType = fullDataType, scale_t = scale_t)
} else {
gen(max_len = max_len, num_reduce_lanes = num_reduce_lanes, num_stats = num_stats, latency = latency,
fullDataType = fullDataType, scale_t = scale_t)
}
}
def gen[T <: Data, U <: Data](max_len: Int, num_reduce_lanes: Int, num_stats: Int, latency: Int,
fullDataType: Vec[Vec[T]], scale_t: U)(implicit ev: Arithmetic[T]): (DecoupledIO[NormalizedInput[T,U]], DecoupledIO[NormalizedOutput[T,U]]) = {
val norm_unit_module = Module(new Normalizer(max_len, num_reduce_lanes, num_stats, latency, fullDataType, scale_t))
(norm_unit_module.io.in, norm_unit_module.io.out)
}
def passthru[T <: Data, U <: Data](max_len: Int, num_stats: Int, fullDataType: Vec[Vec[T]], scale_t: U)
(implicit ev: Arithmetic[T]): (DecoupledIO[NormalizedInput[T,U]], DecoupledIO[NormalizedOutput[T,U]]) = {
val norm_unit_passthru_q = Module(new Queue(new NormalizedInput[T,U](max_len, num_stats, fullDataType, scale_t), 2))
val norm_unit_passthru_out = Wire(Decoupled(new NormalizedOutput(fullDataType, scale_t)))
norm_unit_passthru_out.valid := norm_unit_passthru_q.io.deq.valid
norm_unit_passthru_out.bits.acc_read_resp := norm_unit_passthru_q.io.deq.bits.acc_read_resp
norm_unit_passthru_out.bits.mean := DontCare
norm_unit_passthru_out.bits.max := DontCare
norm_unit_passthru_out.bits.inv_stddev := DontCare
norm_unit_passthru_out.bits.inv_sum_exp := DontCare
norm_unit_passthru_q.io.deq.ready := norm_unit_passthru_out.ready
(norm_unit_passthru_q.io.enq, norm_unit_passthru_out)
}
}
File Xbar.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressDecoder, AddressSet, RegionType, IdRange, TriStateValue}
import freechips.rocketchip.util.BundleField
// Trades off slave port proximity against routing resource cost
object ForceFanout
{
def apply[T](
a: TriStateValue = TriStateValue.unset,
b: TriStateValue = TriStateValue.unset,
c: TriStateValue = TriStateValue.unset,
d: TriStateValue = TriStateValue.unset,
e: TriStateValue = TriStateValue.unset)(body: Parameters => T)(implicit p: Parameters) =
{
body(p.alterPartial {
case ForceFanoutKey => p(ForceFanoutKey) match {
case ForceFanoutParams(pa, pb, pc, pd, pe) =>
ForceFanoutParams(a.update(pa), b.update(pb), c.update(pc), d.update(pd), e.update(pe))
}
})
}
}
private case class ForceFanoutParams(a: Boolean, b: Boolean, c: Boolean, d: Boolean, e: Boolean)
private case object ForceFanoutKey extends Field(ForceFanoutParams(false, false, false, false, false))
class TLXbar(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule
{
val node = new TLNexusNode(
clientFn = { seq =>
seq(0).v1copy(
echoFields = BundleField.union(seq.flatMap(_.echoFields)),
requestFields = BundleField.union(seq.flatMap(_.requestFields)),
responseKeys = seq.flatMap(_.responseKeys).distinct,
minLatency = seq.map(_.minLatency).min,
clients = (TLXbar.mapInputIds(seq) zip seq) flatMap { case (range, port) =>
port.clients map { client => client.v1copy(
sourceId = client.sourceId.shift(range.start)
)}
}
)
},
managerFn = { seq =>
val fifoIdFactory = TLXbar.relabeler()
seq(0).v1copy(
responseFields = BundleField.union(seq.flatMap(_.responseFields)),
requestKeys = seq.flatMap(_.requestKeys).distinct,
minLatency = seq.map(_.minLatency).min,
endSinkId = TLXbar.mapOutputIds(seq).map(_.end).max,
managers = seq.flatMap { port =>
require (port.beatBytes == seq(0).beatBytes,
s"Xbar ($name with parent $parent) data widths don't match: ${port.managers.map(_.name)} has ${port.beatBytes}B vs ${seq(0).managers.map(_.name)} has ${seq(0).beatBytes}B")
val fifoIdMapper = fifoIdFactory()
port.managers map { manager => manager.v1copy(
fifoId = manager.fifoId.map(fifoIdMapper(_))
)}
}
)
}
){
override def circuitIdentity = outputs.size == 1 && inputs.size == 1
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
if ((node.in.size * node.out.size) > (8*32)) {
println (s"!!! WARNING !!!")
println (s" Your TLXbar ($name with parent $parent) is very large, with ${node.in.size} Masters and ${node.out.size} Slaves.")
println (s"!!! WARNING !!!")
}
val wide_bundle = TLBundleParameters.union((node.in ++ node.out).map(_._2.bundle))
override def desiredName = (Seq("TLXbar") ++ nameSuffix ++ Seq(s"i${node.in.size}_o${node.out.size}_${wide_bundle.shortName}")).mkString("_")
TLXbar.circuit(policy, node.in, node.out)
}
}
object TLXbar
{
def mapInputIds(ports: Seq[TLMasterPortParameters]) = assignRanges(ports.map(_.endSourceId))
def mapOutputIds(ports: Seq[TLSlavePortParameters]) = assignRanges(ports.map(_.endSinkId))
def assignRanges(sizes: Seq[Int]) = {
val pow2Sizes = sizes.map { z => if (z == 0) 0 else 1 << log2Ceil(z) }
val tuples = pow2Sizes.zipWithIndex.sortBy(_._1) // record old index, then sort by increasing size
val starts = tuples.scanRight(0)(_._1 + _).tail // suffix-sum of the sizes = the start positions
val ranges = (tuples zip starts) map { case ((sz, i), st) =>
(if (sz == 0) IdRange(0, 0) else IdRange(st, st + sz), i)
}
ranges.sortBy(_._2).map(_._1) // Restore orignal order
}
def relabeler() = {
var idFactory = 0
() => {
val fifoMap = scala.collection.mutable.HashMap.empty[Int, Int]
(x: Int) => {
if (fifoMap.contains(x)) fifoMap(x) else {
val out = idFactory
idFactory = idFactory + 1
fifoMap += (x -> out)
out
}
}
}
}
def circuit(policy: TLArbiter.Policy, seqIn: Seq[(TLBundle, TLEdge)], seqOut: Seq[(TLBundle, TLEdge)]) {
val (io_in, edgesIn) = seqIn.unzip
val (io_out, edgesOut) = seqOut.unzip
// Not every master need connect to every slave on every channel; determine which connections are necessary
val reachableIO = edgesIn.map { cp => edgesOut.map { mp =>
cp.client.clients.exists { c => mp.manager.managers.exists { m =>
c.visibility.exists { ca => m.address.exists { ma =>
ca.overlaps(ma)}}}}
}.toVector}.toVector
val probeIO = (edgesIn zip reachableIO).map { case (cp, reachableO) =>
(edgesOut zip reachableO).map { case (mp, reachable) =>
reachable && cp.client.anySupportProbe && mp.manager.managers.exists(_.regionType >= RegionType.TRACKED)
}.toVector}.toVector
val releaseIO = (edgesIn zip reachableIO).map { case (cp, reachableO) =>
(edgesOut zip reachableO).map { case (mp, reachable) =>
reachable && cp.client.anySupportProbe && mp.manager.anySupportAcquireB
}.toVector}.toVector
val connectAIO = reachableIO
val connectBIO = probeIO
val connectCIO = releaseIO
val connectDIO = reachableIO
val connectEIO = releaseIO
def transpose[T](x: Seq[Seq[T]]) = if (x.isEmpty) Nil else Vector.tabulate(x(0).size) { i => Vector.tabulate(x.size) { j => x(j)(i) } }
val connectAOI = transpose(connectAIO)
val connectBOI = transpose(connectBIO)
val connectCOI = transpose(connectCIO)
val connectDOI = transpose(connectDIO)
val connectEOI = transpose(connectEIO)
// Grab the port ID mapping
val inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client))
val outputIdRanges = TLXbar.mapOutputIds(edgesOut.map(_.manager))
// We need an intermediate size of bundle with the widest possible identifiers
val wide_bundle = TLBundleParameters.union(io_in.map(_.params) ++ io_out.map(_.params))
// Handle size = 1 gracefully (Chisel3 empty range is broken)
def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0)
// Transform input bundle sources (sinks use global namespace on both sides)
val in = Wire(Vec(io_in.size, TLBundle(wide_bundle)))
for (i <- 0 until in.size) {
val r = inputIdRanges(i)
if (connectAIO(i).exists(x=>x)) {
in(i).a.bits.user := DontCare
in(i).a.squeezeAll.waiveAll :<>= io_in(i).a.squeezeAll.waiveAll
in(i).a.bits.source := io_in(i).a.bits.source | r.start.U
} else {
in(i).a := DontCare
io_in(i).a := DontCare
in(i).a.valid := false.B
io_in(i).a.ready := true.B
}
if (connectBIO(i).exists(x=>x)) {
io_in(i).b.squeezeAll :<>= in(i).b.squeezeAll
io_in(i).b.bits.source := trim(in(i).b.bits.source, r.size)
} else {
in(i).b := DontCare
io_in(i).b := DontCare
in(i).b.ready := true.B
io_in(i).b.valid := false.B
}
if (connectCIO(i).exists(x=>x)) {
in(i).c.bits.user := DontCare
in(i).c.squeezeAll.waiveAll :<>= io_in(i).c.squeezeAll.waiveAll
in(i).c.bits.source := io_in(i).c.bits.source | r.start.U
} else {
in(i).c := DontCare
io_in(i).c := DontCare
in(i).c.valid := false.B
io_in(i).c.ready := true.B
}
if (connectDIO(i).exists(x=>x)) {
io_in(i).d.squeezeAll.waiveAll :<>= in(i).d.squeezeAll.waiveAll
io_in(i).d.bits.source := trim(in(i).d.bits.source, r.size)
} else {
in(i).d := DontCare
io_in(i).d := DontCare
in(i).d.ready := true.B
io_in(i).d.valid := false.B
}
if (connectEIO(i).exists(x=>x)) {
in(i).e.squeezeAll :<>= io_in(i).e.squeezeAll
} else {
in(i).e := DontCare
io_in(i).e := DontCare
in(i).e.valid := false.B
io_in(i).e.ready := true.B
}
}
// Transform output bundle sinks (sources use global namespace on both sides)
val out = Wire(Vec(io_out.size, TLBundle(wide_bundle)))
for (o <- 0 until out.size) {
val r = outputIdRanges(o)
if (connectAOI(o).exists(x=>x)) {
out(o).a.bits.user := DontCare
io_out(o).a.squeezeAll.waiveAll :<>= out(o).a.squeezeAll.waiveAll
} else {
out(o).a := DontCare
io_out(o).a := DontCare
out(o).a.ready := true.B
io_out(o).a.valid := false.B
}
if (connectBOI(o).exists(x=>x)) {
out(o).b.squeezeAll :<>= io_out(o).b.squeezeAll
} else {
out(o).b := DontCare
io_out(o).b := DontCare
out(o).b.valid := false.B
io_out(o).b.ready := true.B
}
if (connectCOI(o).exists(x=>x)) {
out(o).c.bits.user := DontCare
io_out(o).c.squeezeAll.waiveAll :<>= out(o).c.squeezeAll.waiveAll
} else {
out(o).c := DontCare
io_out(o).c := DontCare
out(o).c.ready := true.B
io_out(o).c.valid := false.B
}
if (connectDOI(o).exists(x=>x)) {
out(o).d.squeezeAll :<>= io_out(o).d.squeezeAll
out(o).d.bits.sink := io_out(o).d.bits.sink | r.start.U
} else {
out(o).d := DontCare
io_out(o).d := DontCare
out(o).d.valid := false.B
io_out(o).d.ready := true.B
}
if (connectEOI(o).exists(x=>x)) {
io_out(o).e.squeezeAll :<>= out(o).e.squeezeAll
io_out(o).e.bits.sink := trim(out(o).e.bits.sink, r.size)
} else {
out(o).e := DontCare
io_out(o).e := DontCare
out(o).e.ready := true.B
io_out(o).e.valid := false.B
}
}
// Filter a list to only those elements selected
def filter[T](data: Seq[T], mask: Seq[Boolean]) = (data zip mask).filter(_._2).map(_._1)
// Based on input=>output connectivity, create per-input minimal address decode circuits
val requiredAC = (connectAIO ++ connectCIO).distinct
val outputPortFns: Map[Vector[Boolean], Seq[UInt => Bool]] = requiredAC.map { connectO =>
val port_addrs = edgesOut.map(_.manager.managers.flatMap(_.address))
val routingMask = AddressDecoder(filter(port_addrs, connectO))
val route_addrs = port_addrs.map(seq => AddressSet.unify(seq.map(_.widen(~routingMask)).distinct))
// Print the address mapping
if (false) {
println("Xbar mapping:")
route_addrs.foreach { p =>
print(" ")
p.foreach { a => print(s" ${a}") }
println("")
}
println("--")
}
(connectO, route_addrs.map(seq => (addr: UInt) => seq.map(_.contains(addr)).reduce(_ || _)))
}.toMap
// Print the ID mapping
if (false) {
println(s"XBar mapping:")
(edgesIn zip inputIdRanges).zipWithIndex.foreach { case ((edge, id), i) =>
println(s"\t$i assigned ${id} for ${edge.client.clients.map(_.name).mkString(", ")}")
}
println("")
}
val addressA = (in zip edgesIn) map { case (i, e) => e.address(i.a.bits) }
val addressC = (in zip edgesIn) map { case (i, e) => e.address(i.c.bits) }
def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B
val requestAIO = (connectAIO zip addressA) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } }
val requestCIO = (connectCIO zip addressC) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } }
val requestBOI = out.map { o => inputIdRanges.map { i => i.contains(o.b.bits.source) } }
val requestDOI = out.map { o => inputIdRanges.map { i => i.contains(o.d.bits.source) } }
val requestEIO = in.map { i => outputIdRanges.map { o => o.contains(i.e.bits.sink) } }
val beatsAI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.a.bits) }
val beatsBO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.b.bits) }
val beatsCI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.c.bits) }
val beatsDO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.d.bits) }
val beatsEI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.e.bits) }
// Fanout the input sources to the output sinks
val portsAOI = transpose((in zip requestAIO) map { case (i, r) => TLXbar.fanout(i.a, r, edgesOut.map(_.params(ForceFanoutKey).a)) })
val portsBIO = transpose((out zip requestBOI) map { case (o, r) => TLXbar.fanout(o.b, r, edgesIn .map(_.params(ForceFanoutKey).b)) })
val portsCOI = transpose((in zip requestCIO) map { case (i, r) => TLXbar.fanout(i.c, r, edgesOut.map(_.params(ForceFanoutKey).c)) })
val portsDIO = transpose((out zip requestDOI) map { case (o, r) => TLXbar.fanout(o.d, r, edgesIn .map(_.params(ForceFanoutKey).d)) })
val portsEOI = transpose((in zip requestEIO) map { case (i, r) => TLXbar.fanout(i.e, r, edgesOut.map(_.params(ForceFanoutKey).e)) })
// Arbitrate amongst the sources
for (o <- 0 until out.size) {
TLArbiter(policy)(out(o).a, filter(beatsAI zip portsAOI(o), connectAOI(o)):_*)
TLArbiter(policy)(out(o).c, filter(beatsCI zip portsCOI(o), connectCOI(o)):_*)
TLArbiter(policy)(out(o).e, filter(beatsEI zip portsEOI(o), connectEOI(o)):_*)
filter(portsAOI(o), connectAOI(o).map(!_)) foreach { r => r.ready := false.B }
filter(portsCOI(o), connectCOI(o).map(!_)) foreach { r => r.ready := false.B }
filter(portsEOI(o), connectEOI(o).map(!_)) foreach { r => r.ready := false.B }
}
for (i <- 0 until in.size) {
TLArbiter(policy)(in(i).b, filter(beatsBO zip portsBIO(i), connectBIO(i)):_*)
TLArbiter(policy)(in(i).d, filter(beatsDO zip portsDIO(i), connectDIO(i)):_*)
filter(portsBIO(i), connectBIO(i).map(!_)) foreach { r => r.ready := false.B }
filter(portsDIO(i), connectDIO(i).map(!_)) foreach { r => r.ready := false.B }
}
}
def apply(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode =
{
val xbar = LazyModule(new TLXbar(policy, nameSuffix))
xbar.node
}
// Replicate an input port to each output port
def fanout[T <: TLChannel](input: DecoupledIO[T], select: Seq[Bool], force: Seq[Boolean] = Nil): Seq[DecoupledIO[T]] = {
val filtered = Wire(Vec(select.size, chiselTypeOf(input)))
for (i <- 0 until select.size) {
filtered(i).bits := (if (force.lift(i).getOrElse(false)) IdentityModule(input.bits) else input.bits)
filtered(i).valid := input.valid && (select(i) || (select.size == 1).B)
}
input.ready := Mux1H(select, filtered.map(_.ready))
filtered
}
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMXbar(nManagers: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("Xbar"))
val xbar = LazyModule(new TLXbar)
xbar.node := TLDelayer(0.1) := model.node := fuzz.node
(0 until nManagers) foreach { n =>
val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff)))
ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMXbarTest(nManagers: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMXbar(nManagers,txns)).module)
dut.io.start := io.start
io.finished := dut.io.finished
}
class TLMulticlientXbar(nManagers: Int, nClients: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val xbar = LazyModule(new TLXbar)
val fuzzers = (0 until nClients) map { n =>
val fuzz = LazyModule(new TLFuzzer(txns))
xbar.node := TLDelayer(0.1) := fuzz.node
fuzz
}
(0 until nManagers) foreach { n =>
val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff)))
ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzzers.last.module.io.finished
}
}
class TLMulticlientXbarTest(nManagers: Int, nClients: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLMulticlientXbar(nManagers, nClients, txns)).module)
dut.io.start := io.start
io.finished := dut.io.finished
}
| module Scratchpad( // @[Scratchpad.scala:205:9]
input clock, // @[Scratchpad.scala:205:9]
input reset, // @[Scratchpad.scala:205:9]
input auto_id_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_id_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_id_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_id_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_id_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_id_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_id_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [15:0] auto_id_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [127:0] auto_id_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_id_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_id_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_id_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_id_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_id_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_id_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_id_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_id_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_id_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [127:0] auto_id_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_id_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output io_dma_read_req_ready, // @[Scratchpad.scala:206:16]
input io_dma_read_req_valid, // @[Scratchpad.scala:206:16]
input [39:0] io_dma_read_req_bits_vaddr, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_laddr_is_acc_addr, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_laddr_accumulate, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_laddr_read_full_acc_row, // @[Scratchpad.scala:206:16]
input [2:0] io_dma_read_req_bits_laddr_norm_cmd, // @[Scratchpad.scala:206:16]
input [10:0] io_dma_read_req_bits_laddr_garbage, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_laddr_garbage_bit, // @[Scratchpad.scala:206:16]
input [13:0] io_dma_read_req_bits_laddr_data, // @[Scratchpad.scala:206:16]
input [15:0] io_dma_read_req_bits_cols, // @[Scratchpad.scala:206:16]
input [15:0] io_dma_read_req_bits_repeats, // @[Scratchpad.scala:206:16]
input [31:0] io_dma_read_req_bits_scale, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_has_acc_bitwidth, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_all_zeros, // @[Scratchpad.scala:206:16]
input [15:0] io_dma_read_req_bits_block_stride, // @[Scratchpad.scala:206:16]
input [7:0] io_dma_read_req_bits_pixel_repeats, // @[Scratchpad.scala:206:16]
input [7:0] io_dma_read_req_bits_cmd_id, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_debug, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_cease, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_wfi, // @[Scratchpad.scala:206:16]
input [31:0] io_dma_read_req_bits_status_isa, // @[Scratchpad.scala:206:16]
input [1:0] io_dma_read_req_bits_status_dprv, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_dv, // @[Scratchpad.scala:206:16]
input [1:0] io_dma_read_req_bits_status_prv, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_v, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_sd, // @[Scratchpad.scala:206:16]
input [22:0] io_dma_read_req_bits_status_zero2, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_mpv, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_gva, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_mbe, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_sbe, // @[Scratchpad.scala:206:16]
input [1:0] io_dma_read_req_bits_status_sxl, // @[Scratchpad.scala:206:16]
input [1:0] io_dma_read_req_bits_status_uxl, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_sd_rv32, // @[Scratchpad.scala:206:16]
input [7:0] io_dma_read_req_bits_status_zero1, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_tsr, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_tw, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_tvm, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_mxr, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_sum, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_mprv, // @[Scratchpad.scala:206:16]
input [1:0] io_dma_read_req_bits_status_xs, // @[Scratchpad.scala:206:16]
input [1:0] io_dma_read_req_bits_status_fs, // @[Scratchpad.scala:206:16]
input [1:0] io_dma_read_req_bits_status_mpp, // @[Scratchpad.scala:206:16]
input [1:0] io_dma_read_req_bits_status_vs, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_spp, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_mpie, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_ube, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_spie, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_upie, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_mie, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_hie, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_sie, // @[Scratchpad.scala:206:16]
input io_dma_read_req_bits_status_uie, // @[Scratchpad.scala:206:16]
output io_dma_read_resp_valid, // @[Scratchpad.scala:206:16]
output [15:0] io_dma_read_resp_bits_bytesRead, // @[Scratchpad.scala:206:16]
output [7:0] io_dma_read_resp_bits_cmd_id, // @[Scratchpad.scala:206:16]
output io_dma_write_req_ready, // @[Scratchpad.scala:206:16]
input io_dma_write_req_valid, // @[Scratchpad.scala:206:16]
input [39:0] io_dma_write_req_bits_vaddr, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_laddr_is_acc_addr, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_laddr_accumulate, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_laddr_read_full_acc_row, // @[Scratchpad.scala:206:16]
input [10:0] io_dma_write_req_bits_laddr_garbage, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_laddr_garbage_bit, // @[Scratchpad.scala:206:16]
input [13:0] io_dma_write_req_bits_laddr_data, // @[Scratchpad.scala:206:16]
input [2:0] io_dma_write_req_bits_acc_act, // @[Scratchpad.scala:206:16]
input [31:0] io_dma_write_req_bits_acc_scale, // @[Scratchpad.scala:206:16]
input [15:0] io_dma_write_req_bits_len, // @[Scratchpad.scala:206:16]
input [7:0] io_dma_write_req_bits_block, // @[Scratchpad.scala:206:16]
input [7:0] io_dma_write_req_bits_cmd_id, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_debug, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_cease, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_wfi, // @[Scratchpad.scala:206:16]
input [31:0] io_dma_write_req_bits_status_isa, // @[Scratchpad.scala:206:16]
input [1:0] io_dma_write_req_bits_status_dprv, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_dv, // @[Scratchpad.scala:206:16]
input [1:0] io_dma_write_req_bits_status_prv, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_v, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_sd, // @[Scratchpad.scala:206:16]
input [22:0] io_dma_write_req_bits_status_zero2, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_mpv, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_gva, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_mbe, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_sbe, // @[Scratchpad.scala:206:16]
input [1:0] io_dma_write_req_bits_status_sxl, // @[Scratchpad.scala:206:16]
input [1:0] io_dma_write_req_bits_status_uxl, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_sd_rv32, // @[Scratchpad.scala:206:16]
input [7:0] io_dma_write_req_bits_status_zero1, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_tsr, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_tw, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_tvm, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_mxr, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_sum, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_mprv, // @[Scratchpad.scala:206:16]
input [1:0] io_dma_write_req_bits_status_xs, // @[Scratchpad.scala:206:16]
input [1:0] io_dma_write_req_bits_status_fs, // @[Scratchpad.scala:206:16]
input [1:0] io_dma_write_req_bits_status_mpp, // @[Scratchpad.scala:206:16]
input [1:0] io_dma_write_req_bits_status_vs, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_spp, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_mpie, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_ube, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_spie, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_upie, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_mie, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_hie, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_sie, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_status_uie, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_pool_en, // @[Scratchpad.scala:206:16]
input io_dma_write_req_bits_store_en, // @[Scratchpad.scala:206:16]
output io_dma_write_resp_valid, // @[Scratchpad.scala:206:16]
output [7:0] io_dma_write_resp_bits_cmd_id, // @[Scratchpad.scala:206:16]
output io_srams_read_0_req_ready, // @[Scratchpad.scala:206:16]
input io_srams_read_0_req_valid, // @[Scratchpad.scala:206:16]
input [11:0] io_srams_read_0_req_bits_addr, // @[Scratchpad.scala:206:16]
input io_srams_read_0_resp_ready, // @[Scratchpad.scala:206:16]
output io_srams_read_0_resp_valid, // @[Scratchpad.scala:206:16]
output [127:0] io_srams_read_0_resp_bits_data, // @[Scratchpad.scala:206:16]
output io_srams_read_0_resp_bits_fromDMA, // @[Scratchpad.scala:206:16]
output io_srams_read_1_req_ready, // @[Scratchpad.scala:206:16]
input io_srams_read_1_req_valid, // @[Scratchpad.scala:206:16]
input [11:0] io_srams_read_1_req_bits_addr, // @[Scratchpad.scala:206:16]
input io_srams_read_1_resp_ready, // @[Scratchpad.scala:206:16]
output io_srams_read_1_resp_valid, // @[Scratchpad.scala:206:16]
output [127:0] io_srams_read_1_resp_bits_data, // @[Scratchpad.scala:206:16]
output io_srams_read_1_resp_bits_fromDMA, // @[Scratchpad.scala:206:16]
output io_srams_read_2_req_ready, // @[Scratchpad.scala:206:16]
input io_srams_read_2_req_valid, // @[Scratchpad.scala:206:16]
input [11:0] io_srams_read_2_req_bits_addr, // @[Scratchpad.scala:206:16]
input io_srams_read_2_resp_ready, // @[Scratchpad.scala:206:16]
output io_srams_read_2_resp_valid, // @[Scratchpad.scala:206:16]
output [127:0] io_srams_read_2_resp_bits_data, // @[Scratchpad.scala:206:16]
output io_srams_read_2_resp_bits_fromDMA, // @[Scratchpad.scala:206:16]
output io_srams_read_3_req_ready, // @[Scratchpad.scala:206:16]
input io_srams_read_3_req_valid, // @[Scratchpad.scala:206:16]
input [11:0] io_srams_read_3_req_bits_addr, // @[Scratchpad.scala:206:16]
input io_srams_read_3_resp_ready, // @[Scratchpad.scala:206:16]
output io_srams_read_3_resp_valid, // @[Scratchpad.scala:206:16]
output [127:0] io_srams_read_3_resp_bits_data, // @[Scratchpad.scala:206:16]
output io_srams_read_3_resp_bits_fromDMA, // @[Scratchpad.scala:206:16]
input io_srams_write_0_en, // @[Scratchpad.scala:206:16]
input [11:0] io_srams_write_0_addr, // @[Scratchpad.scala:206:16]
input io_srams_write_0_mask_0, // @[Scratchpad.scala:206:16]
input io_srams_write_0_mask_1, // @[Scratchpad.scala:206:16]
input io_srams_write_0_mask_2, // @[Scratchpad.scala:206:16]
input io_srams_write_0_mask_3, // @[Scratchpad.scala:206:16]
input io_srams_write_0_mask_4, // @[Scratchpad.scala:206:16]
input io_srams_write_0_mask_5, // @[Scratchpad.scala:206:16]
input io_srams_write_0_mask_6, // @[Scratchpad.scala:206:16]
input io_srams_write_0_mask_7, // @[Scratchpad.scala:206:16]
input io_srams_write_0_mask_8, // @[Scratchpad.scala:206:16]
input io_srams_write_0_mask_9, // @[Scratchpad.scala:206:16]
input io_srams_write_0_mask_10, // @[Scratchpad.scala:206:16]
input io_srams_write_0_mask_11, // @[Scratchpad.scala:206:16]
input io_srams_write_0_mask_12, // @[Scratchpad.scala:206:16]
input io_srams_write_0_mask_13, // @[Scratchpad.scala:206:16]
input io_srams_write_0_mask_14, // @[Scratchpad.scala:206:16]
input io_srams_write_0_mask_15, // @[Scratchpad.scala:206:16]
input [127:0] io_srams_write_0_data, // @[Scratchpad.scala:206:16]
input io_srams_write_1_en, // @[Scratchpad.scala:206:16]
input [11:0] io_srams_write_1_addr, // @[Scratchpad.scala:206:16]
input io_srams_write_1_mask_0, // @[Scratchpad.scala:206:16]
input io_srams_write_1_mask_1, // @[Scratchpad.scala:206:16]
input io_srams_write_1_mask_2, // @[Scratchpad.scala:206:16]
input io_srams_write_1_mask_3, // @[Scratchpad.scala:206:16]
input io_srams_write_1_mask_4, // @[Scratchpad.scala:206:16]
input io_srams_write_1_mask_5, // @[Scratchpad.scala:206:16]
input io_srams_write_1_mask_6, // @[Scratchpad.scala:206:16]
input io_srams_write_1_mask_7, // @[Scratchpad.scala:206:16]
input io_srams_write_1_mask_8, // @[Scratchpad.scala:206:16]
input io_srams_write_1_mask_9, // @[Scratchpad.scala:206:16]
input io_srams_write_1_mask_10, // @[Scratchpad.scala:206:16]
input io_srams_write_1_mask_11, // @[Scratchpad.scala:206:16]
input io_srams_write_1_mask_12, // @[Scratchpad.scala:206:16]
input io_srams_write_1_mask_13, // @[Scratchpad.scala:206:16]
input io_srams_write_1_mask_14, // @[Scratchpad.scala:206:16]
input io_srams_write_1_mask_15, // @[Scratchpad.scala:206:16]
input [127:0] io_srams_write_1_data, // @[Scratchpad.scala:206:16]
input io_srams_write_2_en, // @[Scratchpad.scala:206:16]
input [11:0] io_srams_write_2_addr, // @[Scratchpad.scala:206:16]
input io_srams_write_2_mask_0, // @[Scratchpad.scala:206:16]
input io_srams_write_2_mask_1, // @[Scratchpad.scala:206:16]
input io_srams_write_2_mask_2, // @[Scratchpad.scala:206:16]
input io_srams_write_2_mask_3, // @[Scratchpad.scala:206:16]
input io_srams_write_2_mask_4, // @[Scratchpad.scala:206:16]
input io_srams_write_2_mask_5, // @[Scratchpad.scala:206:16]
input io_srams_write_2_mask_6, // @[Scratchpad.scala:206:16]
input io_srams_write_2_mask_7, // @[Scratchpad.scala:206:16]
input io_srams_write_2_mask_8, // @[Scratchpad.scala:206:16]
input io_srams_write_2_mask_9, // @[Scratchpad.scala:206:16]
input io_srams_write_2_mask_10, // @[Scratchpad.scala:206:16]
input io_srams_write_2_mask_11, // @[Scratchpad.scala:206:16]
input io_srams_write_2_mask_12, // @[Scratchpad.scala:206:16]
input io_srams_write_2_mask_13, // @[Scratchpad.scala:206:16]
input io_srams_write_2_mask_14, // @[Scratchpad.scala:206:16]
input io_srams_write_2_mask_15, // @[Scratchpad.scala:206:16]
input [127:0] io_srams_write_2_data, // @[Scratchpad.scala:206:16]
input io_srams_write_3_en, // @[Scratchpad.scala:206:16]
input [11:0] io_srams_write_3_addr, // @[Scratchpad.scala:206:16]
input io_srams_write_3_mask_0, // @[Scratchpad.scala:206:16]
input io_srams_write_3_mask_1, // @[Scratchpad.scala:206:16]
input io_srams_write_3_mask_2, // @[Scratchpad.scala:206:16]
input io_srams_write_3_mask_3, // @[Scratchpad.scala:206:16]
input io_srams_write_3_mask_4, // @[Scratchpad.scala:206:16]
input io_srams_write_3_mask_5, // @[Scratchpad.scala:206:16]
input io_srams_write_3_mask_6, // @[Scratchpad.scala:206:16]
input io_srams_write_3_mask_7, // @[Scratchpad.scala:206:16]
input io_srams_write_3_mask_8, // @[Scratchpad.scala:206:16]
input io_srams_write_3_mask_9, // @[Scratchpad.scala:206:16]
input io_srams_write_3_mask_10, // @[Scratchpad.scala:206:16]
input io_srams_write_3_mask_11, // @[Scratchpad.scala:206:16]
input io_srams_write_3_mask_12, // @[Scratchpad.scala:206:16]
input io_srams_write_3_mask_13, // @[Scratchpad.scala:206:16]
input io_srams_write_3_mask_14, // @[Scratchpad.scala:206:16]
input io_srams_write_3_mask_15, // @[Scratchpad.scala:206:16]
input [127:0] io_srams_write_3_data, // @[Scratchpad.scala:206:16]
output io_acc_read_req_0_ready, // @[Scratchpad.scala:206:16]
input io_acc_read_req_0_valid, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_read_req_0_bits_scale_bits, // @[Scratchpad.scala:206:16]
input [8:0] io_acc_read_req_0_bits_addr, // @[Scratchpad.scala:206:16]
input [2:0] io_acc_read_req_0_bits_act, // @[Scratchpad.scala:206:16]
output io_acc_read_req_1_ready, // @[Scratchpad.scala:206:16]
input io_acc_read_req_1_valid, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_read_req_1_bits_scale_bits, // @[Scratchpad.scala:206:16]
input [8:0] io_acc_read_req_1_bits_addr, // @[Scratchpad.scala:206:16]
input [2:0] io_acc_read_req_1_bits_act, // @[Scratchpad.scala:206:16]
input io_acc_read_resp_0_ready, // @[Scratchpad.scala:206:16]
output io_acc_read_resp_0_valid, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_0_bits_full_data_0_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_0_bits_full_data_1_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_0_bits_full_data_2_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_0_bits_full_data_3_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_0_bits_full_data_4_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_0_bits_full_data_5_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_0_bits_full_data_6_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_0_bits_full_data_7_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_0_bits_full_data_8_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_0_bits_full_data_9_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_0_bits_full_data_10_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_0_bits_full_data_11_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_0_bits_full_data_12_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_0_bits_full_data_13_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_0_bits_full_data_14_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_0_bits_full_data_15_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_0_bits_data_0_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_0_bits_data_1_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_0_bits_data_2_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_0_bits_data_3_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_0_bits_data_4_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_0_bits_data_5_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_0_bits_data_6_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_0_bits_data_7_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_0_bits_data_8_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_0_bits_data_9_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_0_bits_data_10_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_0_bits_data_11_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_0_bits_data_12_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_0_bits_data_13_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_0_bits_data_14_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_0_bits_data_15_0, // @[Scratchpad.scala:206:16]
output [1:0] io_acc_read_resp_0_bits_acc_bank_id, // @[Scratchpad.scala:206:16]
output io_acc_read_resp_0_bits_fromDMA, // @[Scratchpad.scala:206:16]
input io_acc_read_resp_1_ready, // @[Scratchpad.scala:206:16]
output io_acc_read_resp_1_valid, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_1_bits_full_data_0_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_1_bits_full_data_1_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_1_bits_full_data_2_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_1_bits_full_data_3_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_1_bits_full_data_4_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_1_bits_full_data_5_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_1_bits_full_data_6_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_1_bits_full_data_7_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_1_bits_full_data_8_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_1_bits_full_data_9_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_1_bits_full_data_10_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_1_bits_full_data_11_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_1_bits_full_data_12_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_1_bits_full_data_13_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_1_bits_full_data_14_0, // @[Scratchpad.scala:206:16]
output [7:0] io_acc_read_resp_1_bits_full_data_15_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_1_bits_data_0_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_1_bits_data_1_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_1_bits_data_2_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_1_bits_data_3_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_1_bits_data_4_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_1_bits_data_5_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_1_bits_data_6_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_1_bits_data_7_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_1_bits_data_8_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_1_bits_data_9_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_1_bits_data_10_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_1_bits_data_11_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_1_bits_data_12_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_1_bits_data_13_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_1_bits_data_14_0, // @[Scratchpad.scala:206:16]
output [31:0] io_acc_read_resp_1_bits_data_15_0, // @[Scratchpad.scala:206:16]
output [1:0] io_acc_read_resp_1_bits_acc_bank_id, // @[Scratchpad.scala:206:16]
output io_acc_read_resp_1_bits_fromDMA, // @[Scratchpad.scala:206:16]
input io_acc_write_0_valid, // @[Scratchpad.scala:206:16]
input [8:0] io_acc_write_0_bits_addr, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_0_bits_data_0_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_0_bits_data_1_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_0_bits_data_2_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_0_bits_data_3_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_0_bits_data_4_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_0_bits_data_5_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_0_bits_data_6_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_0_bits_data_7_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_0_bits_data_8_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_0_bits_data_9_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_0_bits_data_10_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_0_bits_data_11_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_0_bits_data_12_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_0_bits_data_13_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_0_bits_data_14_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_0_bits_data_15_0, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_acc, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_0, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_1, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_2, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_3, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_4, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_5, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_6, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_7, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_8, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_9, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_10, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_11, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_12, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_13, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_14, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_15, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_16, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_17, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_18, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_19, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_20, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_21, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_22, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_23, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_24, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_25, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_26, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_27, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_28, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_29, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_30, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_31, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_32, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_33, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_34, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_35, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_36, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_37, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_38, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_39, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_40, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_41, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_42, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_43, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_44, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_45, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_46, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_47, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_48, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_49, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_50, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_51, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_52, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_53, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_54, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_55, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_56, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_57, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_58, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_59, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_60, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_61, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_62, // @[Scratchpad.scala:206:16]
input io_acc_write_0_bits_mask_63, // @[Scratchpad.scala:206:16]
input io_acc_write_1_valid, // @[Scratchpad.scala:206:16]
input [8:0] io_acc_write_1_bits_addr, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_1_bits_data_0_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_1_bits_data_1_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_1_bits_data_2_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_1_bits_data_3_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_1_bits_data_4_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_1_bits_data_5_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_1_bits_data_6_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_1_bits_data_7_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_1_bits_data_8_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_1_bits_data_9_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_1_bits_data_10_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_1_bits_data_11_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_1_bits_data_12_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_1_bits_data_13_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_1_bits_data_14_0, // @[Scratchpad.scala:206:16]
input [31:0] io_acc_write_1_bits_data_15_0, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_acc, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_0, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_1, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_2, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_3, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_4, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_5, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_6, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_7, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_8, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_9, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_10, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_11, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_12, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_13, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_14, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_15, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_16, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_17, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_18, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_19, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_20, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_21, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_22, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_23, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_24, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_25, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_26, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_27, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_28, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_29, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_30, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_31, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_32, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_33, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_34, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_35, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_36, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_37, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_38, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_39, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_40, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_41, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_42, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_43, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_44, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_45, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_46, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_47, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_48, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_49, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_50, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_51, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_52, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_53, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_54, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_55, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_56, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_57, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_58, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_59, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_60, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_61, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_62, // @[Scratchpad.scala:206:16]
input io_acc_write_1_bits_mask_63, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_valid, // @[Scratchpad.scala:206:16]
output [39:0] io_tlb_0_req_bits_tlb_req_vaddr, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_debug, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_cease, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_wfi, // @[Scratchpad.scala:206:16]
output [31:0] io_tlb_0_req_bits_status_isa, // @[Scratchpad.scala:206:16]
output [1:0] io_tlb_0_req_bits_status_dprv, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_dv, // @[Scratchpad.scala:206:16]
output [1:0] io_tlb_0_req_bits_status_prv, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_v, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_sd, // @[Scratchpad.scala:206:16]
output [22:0] io_tlb_0_req_bits_status_zero2, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_mpv, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_gva, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_mbe, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_sbe, // @[Scratchpad.scala:206:16]
output [1:0] io_tlb_0_req_bits_status_sxl, // @[Scratchpad.scala:206:16]
output [1:0] io_tlb_0_req_bits_status_uxl, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_sd_rv32, // @[Scratchpad.scala:206:16]
output [7:0] io_tlb_0_req_bits_status_zero1, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_tsr, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_tw, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_tvm, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_mxr, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_sum, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_mprv, // @[Scratchpad.scala:206:16]
output [1:0] io_tlb_0_req_bits_status_xs, // @[Scratchpad.scala:206:16]
output [1:0] io_tlb_0_req_bits_status_fs, // @[Scratchpad.scala:206:16]
output [1:0] io_tlb_0_req_bits_status_mpp, // @[Scratchpad.scala:206:16]
output [1:0] io_tlb_0_req_bits_status_vs, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_spp, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_mpie, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_ube, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_spie, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_upie, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_mie, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_hie, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_sie, // @[Scratchpad.scala:206:16]
output io_tlb_0_req_bits_status_uie, // @[Scratchpad.scala:206:16]
input io_tlb_0_resp_miss, // @[Scratchpad.scala:206:16]
input [31:0] io_tlb_0_resp_paddr, // @[Scratchpad.scala:206:16]
input [39:0] io_tlb_0_resp_gpa, // @[Scratchpad.scala:206:16]
input io_tlb_0_resp_pf_ld, // @[Scratchpad.scala:206:16]
input io_tlb_0_resp_pf_st, // @[Scratchpad.scala:206:16]
input io_tlb_0_resp_pf_inst, // @[Scratchpad.scala:206:16]
input io_tlb_0_resp_ae_ld, // @[Scratchpad.scala:206:16]
input io_tlb_0_resp_ae_st, // @[Scratchpad.scala:206:16]
input io_tlb_0_resp_ae_inst, // @[Scratchpad.scala:206:16]
input io_tlb_0_resp_cacheable, // @[Scratchpad.scala:206:16]
input io_tlb_0_resp_must_alloc, // @[Scratchpad.scala:206:16]
input io_tlb_0_resp_prefetchable, // @[Scratchpad.scala:206:16]
input [4:0] io_tlb_0_resp_cmd, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_valid, // @[Scratchpad.scala:206:16]
output [39:0] io_tlb_1_req_bits_tlb_req_vaddr, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_debug, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_cease, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_wfi, // @[Scratchpad.scala:206:16]
output [31:0] io_tlb_1_req_bits_status_isa, // @[Scratchpad.scala:206:16]
output [1:0] io_tlb_1_req_bits_status_dprv, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_dv, // @[Scratchpad.scala:206:16]
output [1:0] io_tlb_1_req_bits_status_prv, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_v, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_sd, // @[Scratchpad.scala:206:16]
output [22:0] io_tlb_1_req_bits_status_zero2, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_mpv, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_gva, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_mbe, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_sbe, // @[Scratchpad.scala:206:16]
output [1:0] io_tlb_1_req_bits_status_sxl, // @[Scratchpad.scala:206:16]
output [1:0] io_tlb_1_req_bits_status_uxl, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_sd_rv32, // @[Scratchpad.scala:206:16]
output [7:0] io_tlb_1_req_bits_status_zero1, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_tsr, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_tw, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_tvm, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_mxr, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_sum, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_mprv, // @[Scratchpad.scala:206:16]
output [1:0] io_tlb_1_req_bits_status_xs, // @[Scratchpad.scala:206:16]
output [1:0] io_tlb_1_req_bits_status_fs, // @[Scratchpad.scala:206:16]
output [1:0] io_tlb_1_req_bits_status_mpp, // @[Scratchpad.scala:206:16]
output [1:0] io_tlb_1_req_bits_status_vs, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_spp, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_mpie, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_ube, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_spie, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_upie, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_mie, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_hie, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_sie, // @[Scratchpad.scala:206:16]
output io_tlb_1_req_bits_status_uie, // @[Scratchpad.scala:206:16]
input io_tlb_1_resp_miss, // @[Scratchpad.scala:206:16]
input [31:0] io_tlb_1_resp_paddr, // @[Scratchpad.scala:206:16]
input [39:0] io_tlb_1_resp_gpa, // @[Scratchpad.scala:206:16]
input io_tlb_1_resp_pf_ld, // @[Scratchpad.scala:206:16]
input io_tlb_1_resp_pf_st, // @[Scratchpad.scala:206:16]
input io_tlb_1_resp_pf_inst, // @[Scratchpad.scala:206:16]
input io_tlb_1_resp_ae_ld, // @[Scratchpad.scala:206:16]
input io_tlb_1_resp_ae_st, // @[Scratchpad.scala:206:16]
input io_tlb_1_resp_ae_inst, // @[Scratchpad.scala:206:16]
input io_tlb_1_resp_cacheable, // @[Scratchpad.scala:206:16]
input io_tlb_1_resp_must_alloc, // @[Scratchpad.scala:206:16]
input io_tlb_1_resp_prefetchable, // @[Scratchpad.scala:206:16]
input [4:0] io_tlb_1_resp_cmd, // @[Scratchpad.scala:206:16]
output io_busy, // @[Scratchpad.scala:206:16]
input io_flush, // @[Scratchpad.scala:206:16]
output io_counter_event_signal_18, // @[Scratchpad.scala:206:16]
output io_counter_event_signal_19, // @[Scratchpad.scala:206:16]
output io_counter_event_signal_20, // @[Scratchpad.scala:206:16]
output io_counter_event_signal_21, // @[Scratchpad.scala:206:16]
output io_counter_event_signal_22, // @[Scratchpad.scala:206:16]
output io_counter_event_signal_23, // @[Scratchpad.scala:206:16]
output [31:0] io_counter_external_values_4, // @[Scratchpad.scala:206:16]
output [31:0] io_counter_external_values_5, // @[Scratchpad.scala:206:16]
output [31:0] io_counter_external_values_6, // @[Scratchpad.scala:206:16]
output [31:0] io_counter_external_values_7, // @[Scratchpad.scala:206:16]
input io_counter_external_reset // @[Scratchpad.scala:206:16]
);
wire zero_writer_pixel_repeater_io_resp_ready; // @[Scratchpad.scala:782:24, :786:72, :808:74]
wire vsm_1_io_resp_ready; // @[Scratchpad.scala:782:24, :786:72]
wire mvin_scale_pixel_repeater_io_resp_ready; // @[Scratchpad.scala:782:24, :786:72]
wire _mvin_scale_acc_laddr_WIRE_4_is_acc_addr; // @[Scratchpad.scala:732:77]
wire _mvin_scale_acc_laddr_WIRE_4_accumulate; // @[Scratchpad.scala:732:77]
wire _mvin_scale_acc_laddr_WIRE_4_read_full_acc_row; // @[Scratchpad.scala:732:77]
wire [2:0] _mvin_scale_acc_laddr_WIRE_4_norm_cmd; // @[Scratchpad.scala:732:77]
wire [10:0] _mvin_scale_acc_laddr_WIRE_4_garbage; // @[Scratchpad.scala:732:77]
wire _mvin_scale_acc_laddr_WIRE_4_garbage_bit; // @[Scratchpad.scala:732:77]
wire _mvin_scale_acc_laddr_WIRE_is_acc_addr; // @[Scratchpad.scala:732:77]
wire _mvin_scale_acc_laddr_WIRE_accumulate; // @[Scratchpad.scala:732:77]
wire _mvin_scale_acc_laddr_WIRE_read_full_acc_row; // @[Scratchpad.scala:732:77]
wire [2:0] _mvin_scale_acc_laddr_WIRE_norm_cmd; // @[Scratchpad.scala:732:77]
wire [10:0] _mvin_scale_acc_laddr_WIRE_garbage; // @[Scratchpad.scala:732:77]
wire _mvin_scale_acc_laddr_WIRE_garbage_bit; // @[Scratchpad.scala:732:77]
wire write_dispatch_q_q_io_deq_ready; // @[Scratchpad.scala:271:53, :472:23, :475:31, :665:23, :675:31]
wire bank_ios_3_write_en; // @[Scratchpad.scala:452:29]
wire bank_ios_3_read_resp_bits_fromDMA; // @[Scratchpad.scala:452:29]
wire [127:0] bank_ios_3_read_resp_bits_data; // @[Scratchpad.scala:452:29]
wire bank_ios_2_write_en; // @[Scratchpad.scala:452:29]
wire bank_ios_2_read_resp_bits_fromDMA; // @[Scratchpad.scala:452:29]
wire [127:0] bank_ios_2_read_resp_bits_data; // @[Scratchpad.scala:452:29]
wire bank_ios_1_write_en; // @[Scratchpad.scala:452:29]
wire bank_ios_1_read_resp_bits_fromDMA; // @[Scratchpad.scala:452:29]
wire [127:0] bank_ios_1_read_resp_bits_data; // @[Scratchpad.scala:452:29]
wire bank_ios_0_write_en; // @[Scratchpad.scala:452:29]
wire bank_ios_0_read_resp_bits_fromDMA; // @[Scratchpad.scala:452:29]
wire [127:0] bank_ios_0_read_resp_bits_data; // @[Scratchpad.scala:452:29]
wire _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_is_acc_addr; // @[Scratchpad.scala:391:89]
wire _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_accumulate; // @[Scratchpad.scala:391:89]
wire _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_read_full_acc_row; // @[Scratchpad.scala:391:89]
wire [2:0] _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_norm_cmd; // @[Scratchpad.scala:391:89]
wire [10:0] _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_garbage; // @[Scratchpad.scala:391:89]
wire _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_garbage_bit; // @[Scratchpad.scala:391:89]
wire widget_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_d_ready; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire [127:0] widget_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire [4:0] widget_auto_anon_out_d_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire [1:0] widget_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_valid; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire [127:0] widget_auto_anon_out_a_bits_data; // @[WidthWidget.scala:27:9]
wire [15:0] widget_auto_anon_out_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [31:0] widget_auto_anon_out_a_bits_address; // @[WidthWidget.scala:27:9]
wire [4:0] widget_auto_anon_out_a_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_out_a_bits_size; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_out_a_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_out_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_ready; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire [127:0] widget_auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9]
wire [15:0] widget_auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [31:0] widget_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire [4:0] widget_auto_anon_in_a_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_a_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [31:0] _acc_adders_io_out_0_0; // @[Scratchpad.scala:628:28]
wire [31:0] _acc_adders_io_out_1_0; // @[Scratchpad.scala:628:28]
wire [31:0] _acc_adders_io_out_2_0; // @[Scratchpad.scala:628:28]
wire [31:0] _acc_adders_io_out_3_0; // @[Scratchpad.scala:628:28]
wire [31:0] _acc_adders_io_out_4_0; // @[Scratchpad.scala:628:28]
wire [31:0] _acc_adders_io_out_5_0; // @[Scratchpad.scala:628:28]
wire [31:0] _acc_adders_io_out_6_0; // @[Scratchpad.scala:628:28]
wire [31:0] _acc_adders_io_out_7_0; // @[Scratchpad.scala:628:28]
wire [31:0] _acc_adders_io_out_8_0; // @[Scratchpad.scala:628:28]
wire [31:0] _acc_adders_io_out_9_0; // @[Scratchpad.scala:628:28]
wire [31:0] _acc_adders_io_out_10_0; // @[Scratchpad.scala:628:28]
wire [31:0] _acc_adders_io_out_11_0; // @[Scratchpad.scala:628:28]
wire [31:0] _acc_adders_io_out_12_0; // @[Scratchpad.scala:628:28]
wire [31:0] _acc_adders_io_out_13_0; // @[Scratchpad.scala:628:28]
wire [31:0] _acc_adders_io_out_14_0; // @[Scratchpad.scala:628:28]
wire [31:0] _acc_adders_io_out_15_0; // @[Scratchpad.scala:628:28]
wire _acc_scale_unit_io_in_ready; // @[Scratchpad.scala:578:32]
wire _acc_scale_unit_io_out_valid; // @[Scratchpad.scala:578:32]
wire [31:0] _acc_scale_unit_io_out_bits_full_data_0_0; // @[Scratchpad.scala:578:32]
wire [31:0] _acc_scale_unit_io_out_bits_full_data_1_0; // @[Scratchpad.scala:578:32]
wire [31:0] _acc_scale_unit_io_out_bits_full_data_2_0; // @[Scratchpad.scala:578:32]
wire [31:0] _acc_scale_unit_io_out_bits_full_data_3_0; // @[Scratchpad.scala:578:32]
wire [31:0] _acc_scale_unit_io_out_bits_full_data_4_0; // @[Scratchpad.scala:578:32]
wire [31:0] _acc_scale_unit_io_out_bits_full_data_5_0; // @[Scratchpad.scala:578:32]
wire [31:0] _acc_scale_unit_io_out_bits_full_data_6_0; // @[Scratchpad.scala:578:32]
wire [31:0] _acc_scale_unit_io_out_bits_full_data_7_0; // @[Scratchpad.scala:578:32]
wire [31:0] _acc_scale_unit_io_out_bits_full_data_8_0; // @[Scratchpad.scala:578:32]
wire [31:0] _acc_scale_unit_io_out_bits_full_data_9_0; // @[Scratchpad.scala:578:32]
wire [31:0] _acc_scale_unit_io_out_bits_full_data_10_0; // @[Scratchpad.scala:578:32]
wire [31:0] _acc_scale_unit_io_out_bits_full_data_11_0; // @[Scratchpad.scala:578:32]
wire [31:0] _acc_scale_unit_io_out_bits_full_data_12_0; // @[Scratchpad.scala:578:32]
wire [31:0] _acc_scale_unit_io_out_bits_full_data_13_0; // @[Scratchpad.scala:578:32]
wire [31:0] _acc_scale_unit_io_out_bits_full_data_14_0; // @[Scratchpad.scala:578:32]
wire [31:0] _acc_scale_unit_io_out_bits_full_data_15_0; // @[Scratchpad.scala:578:32]
wire [7:0] _acc_scale_unit_io_out_bits_data_0_0; // @[Scratchpad.scala:578:32]
wire [7:0] _acc_scale_unit_io_out_bits_data_1_0; // @[Scratchpad.scala:578:32]
wire [7:0] _acc_scale_unit_io_out_bits_data_2_0; // @[Scratchpad.scala:578:32]
wire [7:0] _acc_scale_unit_io_out_bits_data_3_0; // @[Scratchpad.scala:578:32]
wire [7:0] _acc_scale_unit_io_out_bits_data_4_0; // @[Scratchpad.scala:578:32]
wire [7:0] _acc_scale_unit_io_out_bits_data_5_0; // @[Scratchpad.scala:578:32]
wire [7:0] _acc_scale_unit_io_out_bits_data_6_0; // @[Scratchpad.scala:578:32]
wire [7:0] _acc_scale_unit_io_out_bits_data_7_0; // @[Scratchpad.scala:578:32]
wire [7:0] _acc_scale_unit_io_out_bits_data_8_0; // @[Scratchpad.scala:578:32]
wire [7:0] _acc_scale_unit_io_out_bits_data_9_0; // @[Scratchpad.scala:578:32]
wire [7:0] _acc_scale_unit_io_out_bits_data_10_0; // @[Scratchpad.scala:578:32]
wire [7:0] _acc_scale_unit_io_out_bits_data_11_0; // @[Scratchpad.scala:578:32]
wire [7:0] _acc_scale_unit_io_out_bits_data_12_0; // @[Scratchpad.scala:578:32]
wire [7:0] _acc_scale_unit_io_out_bits_data_13_0; // @[Scratchpad.scala:578:32]
wire [7:0] _acc_scale_unit_io_out_bits_data_14_0; // @[Scratchpad.scala:578:32]
wire [7:0] _acc_scale_unit_io_out_bits_data_15_0; // @[Scratchpad.scala:578:32]
wire [1:0] _acc_scale_unit_io_out_bits_acc_bank_id; // @[Scratchpad.scala:578:32]
wire _acc_scale_unit_io_out_bits_fromDMA; // @[Scratchpad.scala:578:32]
wire _norm_unit_passthru_q_io_enq_ready; // @[Normalizer.scala:808:38]
wire _dma_read_pipe_p_3_io_out_valid; // @[Pipeline.scala:75:19]
wire [127:0] _dma_read_pipe_p_3_io_out_bits_data; // @[Pipeline.scala:75:19]
wire _dma_read_pipe_p_2_io_out_valid; // @[Pipeline.scala:75:19]
wire [127:0] _dma_read_pipe_p_2_io_out_bits_data; // @[Pipeline.scala:75:19]
wire _dma_read_pipe_p_1_io_out_valid; // @[Pipeline.scala:75:19]
wire [127:0] _dma_read_pipe_p_1_io_out_bits_data; // @[Pipeline.scala:75:19]
wire _dma_read_pipe_p_io_out_valid; // @[Pipeline.scala:75:19]
wire [127:0] _dma_read_pipe_p_io_out_bits_data; // @[Pipeline.scala:75:19]
wire _mvin_scale_pixel_repeater_io_req_ready; // @[Scratchpad.scala:387:43]
wire _mvin_scale_pixel_repeater_io_resp_valid; // @[Scratchpad.scala:387:43]
wire [7:0] _mvin_scale_pixel_repeater_io_resp_bits_out_0; // @[Scratchpad.scala:387:43]
wire [7:0] _mvin_scale_pixel_repeater_io_resp_bits_out_1; // @[Scratchpad.scala:387:43]
wire [7:0] _mvin_scale_pixel_repeater_io_resp_bits_out_2; // @[Scratchpad.scala:387:43]
wire [7:0] _mvin_scale_pixel_repeater_io_resp_bits_out_3; // @[Scratchpad.scala:387:43]
wire [7:0] _mvin_scale_pixel_repeater_io_resp_bits_out_4; // @[Scratchpad.scala:387:43]
wire [7:0] _mvin_scale_pixel_repeater_io_resp_bits_out_5; // @[Scratchpad.scala:387:43]
wire [7:0] _mvin_scale_pixel_repeater_io_resp_bits_out_6; // @[Scratchpad.scala:387:43]
wire [7:0] _mvin_scale_pixel_repeater_io_resp_bits_out_7; // @[Scratchpad.scala:387:43]
wire [7:0] _mvin_scale_pixel_repeater_io_resp_bits_out_8; // @[Scratchpad.scala:387:43]
wire [7:0] _mvin_scale_pixel_repeater_io_resp_bits_out_9; // @[Scratchpad.scala:387:43]
wire [7:0] _mvin_scale_pixel_repeater_io_resp_bits_out_10; // @[Scratchpad.scala:387:43]
wire [7:0] _mvin_scale_pixel_repeater_io_resp_bits_out_11; // @[Scratchpad.scala:387:43]
wire [7:0] _mvin_scale_pixel_repeater_io_resp_bits_out_12; // @[Scratchpad.scala:387:43]
wire [7:0] _mvin_scale_pixel_repeater_io_resp_bits_out_13; // @[Scratchpad.scala:387:43]
wire [7:0] _mvin_scale_pixel_repeater_io_resp_bits_out_14; // @[Scratchpad.scala:387:43]
wire [7:0] _mvin_scale_pixel_repeater_io_resp_bits_out_15; // @[Scratchpad.scala:387:43]
wire _mvin_scale_pixel_repeater_io_resp_bits_mask_0; // @[Scratchpad.scala:387:43]
wire _mvin_scale_pixel_repeater_io_resp_bits_mask_1; // @[Scratchpad.scala:387:43]
wire _mvin_scale_pixel_repeater_io_resp_bits_mask_2; // @[Scratchpad.scala:387:43]
wire _mvin_scale_pixel_repeater_io_resp_bits_mask_3; // @[Scratchpad.scala:387:43]
wire _mvin_scale_pixel_repeater_io_resp_bits_mask_4; // @[Scratchpad.scala:387:43]
wire _mvin_scale_pixel_repeater_io_resp_bits_mask_5; // @[Scratchpad.scala:387:43]
wire _mvin_scale_pixel_repeater_io_resp_bits_mask_6; // @[Scratchpad.scala:387:43]
wire _mvin_scale_pixel_repeater_io_resp_bits_mask_7; // @[Scratchpad.scala:387:43]
wire _mvin_scale_pixel_repeater_io_resp_bits_mask_8; // @[Scratchpad.scala:387:43]
wire _mvin_scale_pixel_repeater_io_resp_bits_mask_9; // @[Scratchpad.scala:387:43]
wire _mvin_scale_pixel_repeater_io_resp_bits_mask_10; // @[Scratchpad.scala:387:43]
wire _mvin_scale_pixel_repeater_io_resp_bits_mask_11; // @[Scratchpad.scala:387:43]
wire _mvin_scale_pixel_repeater_io_resp_bits_mask_12; // @[Scratchpad.scala:387:43]
wire _mvin_scale_pixel_repeater_io_resp_bits_mask_13; // @[Scratchpad.scala:387:43]
wire _mvin_scale_pixel_repeater_io_resp_bits_mask_14; // @[Scratchpad.scala:387:43]
wire _mvin_scale_pixel_repeater_io_resp_bits_mask_15; // @[Scratchpad.scala:387:43]
wire [13:0] _mvin_scale_pixel_repeater_io_resp_bits_laddr_data; // @[Scratchpad.scala:387:43]
wire _mvin_scale_pixel_repeater_io_resp_bits_last; // @[Scratchpad.scala:387:43]
wire _mvin_scale_pixel_repeater_io_resp_bits_tag_is_acc; // @[Scratchpad.scala:387:43]
wire _mvin_scale_pixel_repeater_io_resp_bits_tag_accumulate; // @[Scratchpad.scala:387:43]
wire [7:0] _mvin_scale_pixel_repeater_io_resp_bits_tag_bytes_read; // @[Scratchpad.scala:387:43]
wire [7:0] _mvin_scale_pixel_repeater_io_resp_bits_tag_cmd_id; // @[Scratchpad.scala:387:43]
wire _vsm_in_q_1_io_enq_ready; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_valid; // @[VectorScalarMultiplier.scala:201:26]
wire [31:0] _vsm_in_q_1_io_deq_bits_in_0; // @[VectorScalarMultiplier.scala:201:26]
wire [31:0] _vsm_in_q_1_io_deq_bits_in_1; // @[VectorScalarMultiplier.scala:201:26]
wire [31:0] _vsm_in_q_1_io_deq_bits_in_2; // @[VectorScalarMultiplier.scala:201:26]
wire [31:0] _vsm_in_q_1_io_deq_bits_in_3; // @[VectorScalarMultiplier.scala:201:26]
wire [31:0] _vsm_in_q_1_io_deq_bits_in_4; // @[VectorScalarMultiplier.scala:201:26]
wire [31:0] _vsm_in_q_1_io_deq_bits_in_5; // @[VectorScalarMultiplier.scala:201:26]
wire [31:0] _vsm_in_q_1_io_deq_bits_in_6; // @[VectorScalarMultiplier.scala:201:26]
wire [31:0] _vsm_in_q_1_io_deq_bits_in_7; // @[VectorScalarMultiplier.scala:201:26]
wire [31:0] _vsm_in_q_1_io_deq_bits_in_8; // @[VectorScalarMultiplier.scala:201:26]
wire [31:0] _vsm_in_q_1_io_deq_bits_in_9; // @[VectorScalarMultiplier.scala:201:26]
wire [31:0] _vsm_in_q_1_io_deq_bits_in_10; // @[VectorScalarMultiplier.scala:201:26]
wire [31:0] _vsm_in_q_1_io_deq_bits_in_11; // @[VectorScalarMultiplier.scala:201:26]
wire [31:0] _vsm_in_q_1_io_deq_bits_in_12; // @[VectorScalarMultiplier.scala:201:26]
wire [31:0] _vsm_in_q_1_io_deq_bits_in_13; // @[VectorScalarMultiplier.scala:201:26]
wire [31:0] _vsm_in_q_1_io_deq_bits_in_14; // @[VectorScalarMultiplier.scala:201:26]
wire [31:0] _vsm_in_q_1_io_deq_bits_in_15; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_scale; // @[VectorScalarMultiplier.scala:201:26]
wire [15:0] _vsm_in_q_1_io_deq_bits_repeats; // @[VectorScalarMultiplier.scala:201:26]
wire [7:0] _vsm_in_q_1_io_deq_bits_pixel_repeats; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_last; // @[VectorScalarMultiplier.scala:201:26]
wire [511:0] _vsm_in_q_1_io_deq_bits_tag_data; // @[VectorScalarMultiplier.scala:201:26]
wire [13:0] _vsm_in_q_1_io_deq_bits_tag_addr; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_0; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_1; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_2; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_3; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_4; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_5; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_6; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_7; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_8; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_9; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_10; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_11; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_12; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_13; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_14; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_15; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_16; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_17; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_18; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_19; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_20; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_21; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_22; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_23; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_24; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_25; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_26; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_27; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_28; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_29; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_30; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_31; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_32; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_33; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_34; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_35; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_36; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_37; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_38; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_39; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_40; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_41; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_42; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_43; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_44; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_45; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_46; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_47; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_48; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_49; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_50; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_51; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_52; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_53; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_54; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_55; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_56; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_57; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_58; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_59; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_60; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_61; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_62; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_mask_63; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_is_acc; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_accumulate; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_has_acc_bitwidth; // @[VectorScalarMultiplier.scala:201:26]
wire [31:0] _vsm_in_q_1_io_deq_bits_tag_scale; // @[VectorScalarMultiplier.scala:201:26]
wire [15:0] _vsm_in_q_1_io_deq_bits_tag_repeats; // @[VectorScalarMultiplier.scala:201:26]
wire [15:0] _vsm_in_q_1_io_deq_bits_tag_pixel_repeats; // @[VectorScalarMultiplier.scala:201:26]
wire [15:0] _vsm_in_q_1_io_deq_bits_tag_len; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_1_io_deq_bits_tag_last; // @[VectorScalarMultiplier.scala:201:26]
wire [7:0] _vsm_in_q_1_io_deq_bits_tag_bytes_read; // @[VectorScalarMultiplier.scala:201:26]
wire [7:0] _vsm_in_q_1_io_deq_bits_tag_cmd_id; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_1_io_req_ready; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_valid; // @[VectorScalarMultiplier.scala:200:21]
wire [31:0] _vsm_1_io_resp_bits_out_0; // @[VectorScalarMultiplier.scala:200:21]
wire [31:0] _vsm_1_io_resp_bits_out_1; // @[VectorScalarMultiplier.scala:200:21]
wire [31:0] _vsm_1_io_resp_bits_out_2; // @[VectorScalarMultiplier.scala:200:21]
wire [31:0] _vsm_1_io_resp_bits_out_3; // @[VectorScalarMultiplier.scala:200:21]
wire [31:0] _vsm_1_io_resp_bits_out_4; // @[VectorScalarMultiplier.scala:200:21]
wire [31:0] _vsm_1_io_resp_bits_out_5; // @[VectorScalarMultiplier.scala:200:21]
wire [31:0] _vsm_1_io_resp_bits_out_6; // @[VectorScalarMultiplier.scala:200:21]
wire [31:0] _vsm_1_io_resp_bits_out_7; // @[VectorScalarMultiplier.scala:200:21]
wire [31:0] _vsm_1_io_resp_bits_out_8; // @[VectorScalarMultiplier.scala:200:21]
wire [31:0] _vsm_1_io_resp_bits_out_9; // @[VectorScalarMultiplier.scala:200:21]
wire [31:0] _vsm_1_io_resp_bits_out_10; // @[VectorScalarMultiplier.scala:200:21]
wire [31:0] _vsm_1_io_resp_bits_out_11; // @[VectorScalarMultiplier.scala:200:21]
wire [31:0] _vsm_1_io_resp_bits_out_12; // @[VectorScalarMultiplier.scala:200:21]
wire [31:0] _vsm_1_io_resp_bits_out_13; // @[VectorScalarMultiplier.scala:200:21]
wire [31:0] _vsm_1_io_resp_bits_out_14; // @[VectorScalarMultiplier.scala:200:21]
wire [31:0] _vsm_1_io_resp_bits_out_15; // @[VectorScalarMultiplier.scala:200:21]
wire [15:0] _vsm_1_io_resp_bits_row; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_last; // @[VectorScalarMultiplier.scala:200:21]
wire [13:0] _vsm_1_io_resp_bits_tag_addr; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_0; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_1; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_2; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_3; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_4; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_5; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_6; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_7; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_8; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_9; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_10; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_11; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_12; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_13; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_14; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_15; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_16; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_17; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_18; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_19; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_20; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_21; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_22; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_23; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_24; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_25; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_26; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_27; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_28; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_29; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_30; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_31; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_32; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_33; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_34; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_35; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_36; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_37; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_38; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_39; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_40; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_41; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_42; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_43; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_44; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_45; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_46; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_47; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_48; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_49; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_50; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_51; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_52; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_53; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_54; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_55; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_56; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_57; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_58; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_59; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_60; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_61; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_62; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_mask_63; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_is_acc; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_1_io_resp_bits_tag_accumulate; // @[VectorScalarMultiplier.scala:200:21]
wire [7:0] _vsm_1_io_resp_bits_tag_bytes_read; // @[VectorScalarMultiplier.scala:200:21]
wire [7:0] _vsm_1_io_resp_bits_tag_cmd_id; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_in_q_io_enq_ready; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_valid; // @[VectorScalarMultiplier.scala:201:26]
wire [7:0] _vsm_in_q_io_deq_bits_in_0; // @[VectorScalarMultiplier.scala:201:26]
wire [7:0] _vsm_in_q_io_deq_bits_in_1; // @[VectorScalarMultiplier.scala:201:26]
wire [7:0] _vsm_in_q_io_deq_bits_in_2; // @[VectorScalarMultiplier.scala:201:26]
wire [7:0] _vsm_in_q_io_deq_bits_in_3; // @[VectorScalarMultiplier.scala:201:26]
wire [7:0] _vsm_in_q_io_deq_bits_in_4; // @[VectorScalarMultiplier.scala:201:26]
wire [7:0] _vsm_in_q_io_deq_bits_in_5; // @[VectorScalarMultiplier.scala:201:26]
wire [7:0] _vsm_in_q_io_deq_bits_in_6; // @[VectorScalarMultiplier.scala:201:26]
wire [7:0] _vsm_in_q_io_deq_bits_in_7; // @[VectorScalarMultiplier.scala:201:26]
wire [7:0] _vsm_in_q_io_deq_bits_in_8; // @[VectorScalarMultiplier.scala:201:26]
wire [7:0] _vsm_in_q_io_deq_bits_in_9; // @[VectorScalarMultiplier.scala:201:26]
wire [7:0] _vsm_in_q_io_deq_bits_in_10; // @[VectorScalarMultiplier.scala:201:26]
wire [7:0] _vsm_in_q_io_deq_bits_in_11; // @[VectorScalarMultiplier.scala:201:26]
wire [7:0] _vsm_in_q_io_deq_bits_in_12; // @[VectorScalarMultiplier.scala:201:26]
wire [7:0] _vsm_in_q_io_deq_bits_in_13; // @[VectorScalarMultiplier.scala:201:26]
wire [7:0] _vsm_in_q_io_deq_bits_in_14; // @[VectorScalarMultiplier.scala:201:26]
wire [7:0] _vsm_in_q_io_deq_bits_in_15; // @[VectorScalarMultiplier.scala:201:26]
wire [31:0] _vsm_in_q_io_deq_bits_scale_bits; // @[VectorScalarMultiplier.scala:201:26]
wire [15:0] _vsm_in_q_io_deq_bits_repeats; // @[VectorScalarMultiplier.scala:201:26]
wire [7:0] _vsm_in_q_io_deq_bits_pixel_repeats; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_last; // @[VectorScalarMultiplier.scala:201:26]
wire [511:0] _vsm_in_q_io_deq_bits_tag_data; // @[VectorScalarMultiplier.scala:201:26]
wire [13:0] _vsm_in_q_io_deq_bits_tag_addr; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_0; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_1; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_2; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_3; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_4; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_5; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_6; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_7; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_8; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_9; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_10; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_11; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_12; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_13; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_14; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_15; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_16; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_17; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_18; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_19; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_20; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_21; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_22; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_23; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_24; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_25; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_26; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_27; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_28; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_29; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_30; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_31; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_32; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_33; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_34; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_35; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_36; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_37; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_38; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_39; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_40; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_41; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_42; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_43; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_44; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_45; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_46; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_47; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_48; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_49; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_50; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_51; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_52; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_53; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_54; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_55; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_56; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_57; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_58; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_59; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_60; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_61; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_62; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_mask_63; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_is_acc; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_accumulate; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_has_acc_bitwidth; // @[VectorScalarMultiplier.scala:201:26]
wire [31:0] _vsm_in_q_io_deq_bits_tag_scale; // @[VectorScalarMultiplier.scala:201:26]
wire [15:0] _vsm_in_q_io_deq_bits_tag_repeats; // @[VectorScalarMultiplier.scala:201:26]
wire [15:0] _vsm_in_q_io_deq_bits_tag_pixel_repeats; // @[VectorScalarMultiplier.scala:201:26]
wire [15:0] _vsm_in_q_io_deq_bits_tag_len; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_in_q_io_deq_bits_tag_last; // @[VectorScalarMultiplier.scala:201:26]
wire [7:0] _vsm_in_q_io_deq_bits_tag_bytes_read; // @[VectorScalarMultiplier.scala:201:26]
wire [7:0] _vsm_in_q_io_deq_bits_tag_cmd_id; // @[VectorScalarMultiplier.scala:201:26]
wire _vsm_io_req_ready; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_valid; // @[VectorScalarMultiplier.scala:200:21]
wire [7:0] _vsm_io_resp_bits_out_0; // @[VectorScalarMultiplier.scala:200:21]
wire [7:0] _vsm_io_resp_bits_out_1; // @[VectorScalarMultiplier.scala:200:21]
wire [7:0] _vsm_io_resp_bits_out_2; // @[VectorScalarMultiplier.scala:200:21]
wire [7:0] _vsm_io_resp_bits_out_3; // @[VectorScalarMultiplier.scala:200:21]
wire [7:0] _vsm_io_resp_bits_out_4; // @[VectorScalarMultiplier.scala:200:21]
wire [7:0] _vsm_io_resp_bits_out_5; // @[VectorScalarMultiplier.scala:200:21]
wire [7:0] _vsm_io_resp_bits_out_6; // @[VectorScalarMultiplier.scala:200:21]
wire [7:0] _vsm_io_resp_bits_out_7; // @[VectorScalarMultiplier.scala:200:21]
wire [7:0] _vsm_io_resp_bits_out_8; // @[VectorScalarMultiplier.scala:200:21]
wire [7:0] _vsm_io_resp_bits_out_9; // @[VectorScalarMultiplier.scala:200:21]
wire [7:0] _vsm_io_resp_bits_out_10; // @[VectorScalarMultiplier.scala:200:21]
wire [7:0] _vsm_io_resp_bits_out_11; // @[VectorScalarMultiplier.scala:200:21]
wire [7:0] _vsm_io_resp_bits_out_12; // @[VectorScalarMultiplier.scala:200:21]
wire [7:0] _vsm_io_resp_bits_out_13; // @[VectorScalarMultiplier.scala:200:21]
wire [7:0] _vsm_io_resp_bits_out_14; // @[VectorScalarMultiplier.scala:200:21]
wire [7:0] _vsm_io_resp_bits_out_15; // @[VectorScalarMultiplier.scala:200:21]
wire [15:0] _vsm_io_resp_bits_row; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_last; // @[VectorScalarMultiplier.scala:200:21]
wire [511:0] _vsm_io_resp_bits_tag_data; // @[VectorScalarMultiplier.scala:200:21]
wire [13:0] _vsm_io_resp_bits_tag_addr; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_0; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_1; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_2; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_3; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_4; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_5; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_6; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_7; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_8; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_9; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_10; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_11; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_12; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_13; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_14; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_15; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_16; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_17; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_18; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_19; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_20; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_21; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_22; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_23; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_24; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_25; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_26; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_27; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_28; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_29; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_30; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_31; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_32; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_33; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_34; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_35; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_36; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_37; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_38; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_39; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_40; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_41; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_42; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_43; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_44; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_45; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_46; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_47; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_48; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_49; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_50; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_51; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_52; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_53; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_54; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_55; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_56; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_57; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_58; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_59; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_60; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_61; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_62; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_mask_63; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_is_acc; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_accumulate; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_has_acc_bitwidth; // @[VectorScalarMultiplier.scala:200:21]
wire [31:0] _vsm_io_resp_bits_tag_scale; // @[VectorScalarMultiplier.scala:200:21]
wire [15:0] _vsm_io_resp_bits_tag_repeats; // @[VectorScalarMultiplier.scala:200:21]
wire [15:0] _vsm_io_resp_bits_tag_pixel_repeats; // @[VectorScalarMultiplier.scala:200:21]
wire [15:0] _vsm_io_resp_bits_tag_len; // @[VectorScalarMultiplier.scala:200:21]
wire _vsm_io_resp_bits_tag_last; // @[VectorScalarMultiplier.scala:200:21]
wire [7:0] _vsm_io_resp_bits_tag_bytes_read; // @[VectorScalarMultiplier.scala:200:21]
wire [7:0] _vsm_io_resp_bits_tag_cmd_id; // @[VectorScalarMultiplier.scala:200:21]
wire _zero_writer_pixel_repeater_io_req_ready; // @[Scratchpad.scala:330:44]
wire _zero_writer_pixel_repeater_io_resp_valid; // @[Scratchpad.scala:330:44]
wire _zero_writer_pixel_repeater_io_resp_bits_mask_0; // @[Scratchpad.scala:330:44]
wire _zero_writer_pixel_repeater_io_resp_bits_mask_1; // @[Scratchpad.scala:330:44]
wire _zero_writer_pixel_repeater_io_resp_bits_mask_2; // @[Scratchpad.scala:330:44]
wire _zero_writer_pixel_repeater_io_resp_bits_mask_3; // @[Scratchpad.scala:330:44]
wire _zero_writer_pixel_repeater_io_resp_bits_mask_4; // @[Scratchpad.scala:330:44]
wire _zero_writer_pixel_repeater_io_resp_bits_mask_5; // @[Scratchpad.scala:330:44]
wire _zero_writer_pixel_repeater_io_resp_bits_mask_6; // @[Scratchpad.scala:330:44]
wire _zero_writer_pixel_repeater_io_resp_bits_mask_7; // @[Scratchpad.scala:330:44]
wire _zero_writer_pixel_repeater_io_resp_bits_mask_8; // @[Scratchpad.scala:330:44]
wire _zero_writer_pixel_repeater_io_resp_bits_mask_9; // @[Scratchpad.scala:330:44]
wire _zero_writer_pixel_repeater_io_resp_bits_mask_10; // @[Scratchpad.scala:330:44]
wire _zero_writer_pixel_repeater_io_resp_bits_mask_11; // @[Scratchpad.scala:330:44]
wire _zero_writer_pixel_repeater_io_resp_bits_mask_12; // @[Scratchpad.scala:330:44]
wire _zero_writer_pixel_repeater_io_resp_bits_mask_13; // @[Scratchpad.scala:330:44]
wire _zero_writer_pixel_repeater_io_resp_bits_mask_14; // @[Scratchpad.scala:330:44]
wire _zero_writer_pixel_repeater_io_resp_bits_mask_15; // @[Scratchpad.scala:330:44]
wire _zero_writer_pixel_repeater_io_resp_bits_laddr_is_acc_addr; // @[Scratchpad.scala:330:44]
wire _zero_writer_pixel_repeater_io_resp_bits_laddr_accumulate; // @[Scratchpad.scala:330:44]
wire [13:0] _zero_writer_pixel_repeater_io_resp_bits_laddr_data; // @[Scratchpad.scala:330:44]
wire _zero_writer_pixel_repeater_io_resp_bits_last; // @[Scratchpad.scala:330:44]
wire [15:0] _zero_writer_pixel_repeater_io_resp_bits_tag_cols; // @[Scratchpad.scala:330:44]
wire [7:0] _zero_writer_pixel_repeater_io_resp_bits_tag_cmd_id; // @[Scratchpad.scala:330:44]
wire _zero_writer_io_req_ready; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_valid; // @[Scratchpad.scala:317:29]
wire [39:0] _zero_writer_io_resp_bits_tag_vaddr; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_laddr_is_acc_addr; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_laddr_accumulate; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_laddr_read_full_acc_row; // @[Scratchpad.scala:317:29]
wire [2:0] _zero_writer_io_resp_bits_tag_laddr_norm_cmd; // @[Scratchpad.scala:317:29]
wire [10:0] _zero_writer_io_resp_bits_tag_laddr_garbage; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_laddr_garbage_bit; // @[Scratchpad.scala:317:29]
wire [13:0] _zero_writer_io_resp_bits_tag_laddr_data; // @[Scratchpad.scala:317:29]
wire [15:0] _zero_writer_io_resp_bits_tag_cols; // @[Scratchpad.scala:317:29]
wire [15:0] _zero_writer_io_resp_bits_tag_repeats; // @[Scratchpad.scala:317:29]
wire [31:0] _zero_writer_io_resp_bits_tag_scale; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_has_acc_bitwidth; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_all_zeros; // @[Scratchpad.scala:317:29]
wire [15:0] _zero_writer_io_resp_bits_tag_block_stride; // @[Scratchpad.scala:317:29]
wire [7:0] _zero_writer_io_resp_bits_tag_pixel_repeats; // @[Scratchpad.scala:317:29]
wire [7:0] _zero_writer_io_resp_bits_tag_cmd_id; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_debug; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_cease; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_wfi; // @[Scratchpad.scala:317:29]
wire [31:0] _zero_writer_io_resp_bits_tag_status_isa; // @[Scratchpad.scala:317:29]
wire [1:0] _zero_writer_io_resp_bits_tag_status_dprv; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_dv; // @[Scratchpad.scala:317:29]
wire [1:0] _zero_writer_io_resp_bits_tag_status_prv; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_v; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_sd; // @[Scratchpad.scala:317:29]
wire [22:0] _zero_writer_io_resp_bits_tag_status_zero2; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_mpv; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_gva; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_mbe; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_sbe; // @[Scratchpad.scala:317:29]
wire [1:0] _zero_writer_io_resp_bits_tag_status_sxl; // @[Scratchpad.scala:317:29]
wire [1:0] _zero_writer_io_resp_bits_tag_status_uxl; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_sd_rv32; // @[Scratchpad.scala:317:29]
wire [7:0] _zero_writer_io_resp_bits_tag_status_zero1; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_tsr; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_tw; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_tvm; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_mxr; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_sum; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_mprv; // @[Scratchpad.scala:317:29]
wire [1:0] _zero_writer_io_resp_bits_tag_status_xs; // @[Scratchpad.scala:317:29]
wire [1:0] _zero_writer_io_resp_bits_tag_status_fs; // @[Scratchpad.scala:317:29]
wire [1:0] _zero_writer_io_resp_bits_tag_status_mpp; // @[Scratchpad.scala:317:29]
wire [1:0] _zero_writer_io_resp_bits_tag_status_vs; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_spp; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_mpie; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_ube; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_spie; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_upie; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_mie; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_hie; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_sie; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_tag_status_uie; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_laddr_is_acc_addr; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_laddr_accumulate; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_laddr_read_full_acc_row; // @[Scratchpad.scala:317:29]
wire [2:0] _zero_writer_io_resp_bits_laddr_norm_cmd; // @[Scratchpad.scala:317:29]
wire [10:0] _zero_writer_io_resp_bits_laddr_garbage; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_laddr_garbage_bit; // @[Scratchpad.scala:317:29]
wire [13:0] _zero_writer_io_resp_bits_laddr_data; // @[Scratchpad.scala:317:29]
wire _zero_writer_io_resp_bits_last; // @[Scratchpad.scala:317:29]
wire _read_issue_q_io_enq_ready; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_valid; // @[Scratchpad.scala:255:30]
wire [39:0] _read_issue_q_io_deq_bits_vaddr; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_laddr_is_acc_addr; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_laddr_accumulate; // @[Scratchpad.scala:255:30]
wire [13:0] _read_issue_q_io_deq_bits_laddr_data; // @[Scratchpad.scala:255:30]
wire [15:0] _read_issue_q_io_deq_bits_cols; // @[Scratchpad.scala:255:30]
wire [15:0] _read_issue_q_io_deq_bits_repeats; // @[Scratchpad.scala:255:30]
wire [31:0] _read_issue_q_io_deq_bits_scale; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_has_acc_bitwidth; // @[Scratchpad.scala:255:30]
wire [15:0] _read_issue_q_io_deq_bits_block_stride; // @[Scratchpad.scala:255:30]
wire [7:0] _read_issue_q_io_deq_bits_pixel_repeats; // @[Scratchpad.scala:255:30]
wire [7:0] _read_issue_q_io_deq_bits_cmd_id; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_debug; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_cease; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_wfi; // @[Scratchpad.scala:255:30]
wire [31:0] _read_issue_q_io_deq_bits_status_isa; // @[Scratchpad.scala:255:30]
wire [1:0] _read_issue_q_io_deq_bits_status_dprv; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_dv; // @[Scratchpad.scala:255:30]
wire [1:0] _read_issue_q_io_deq_bits_status_prv; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_v; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_sd; // @[Scratchpad.scala:255:30]
wire [22:0] _read_issue_q_io_deq_bits_status_zero2; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_mpv; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_gva; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_mbe; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_sbe; // @[Scratchpad.scala:255:30]
wire [1:0] _read_issue_q_io_deq_bits_status_sxl; // @[Scratchpad.scala:255:30]
wire [1:0] _read_issue_q_io_deq_bits_status_uxl; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_sd_rv32; // @[Scratchpad.scala:255:30]
wire [7:0] _read_issue_q_io_deq_bits_status_zero1; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_tsr; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_tw; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_tvm; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_mxr; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_sum; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_mprv; // @[Scratchpad.scala:255:30]
wire [1:0] _read_issue_q_io_deq_bits_status_xs; // @[Scratchpad.scala:255:30]
wire [1:0] _read_issue_q_io_deq_bits_status_fs; // @[Scratchpad.scala:255:30]
wire [1:0] _read_issue_q_io_deq_bits_status_mpp; // @[Scratchpad.scala:255:30]
wire [1:0] _read_issue_q_io_deq_bits_status_vs; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_spp; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_mpie; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_ube; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_spie; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_upie; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_mie; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_hie; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_sie; // @[Scratchpad.scala:255:30]
wire _read_issue_q_io_deq_bits_status_uie; // @[Scratchpad.scala:255:30]
wire _write_issue_q_io_enq_ready; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_valid; // @[Scratchpad.scala:254:31]
wire [39:0] _write_issue_q_io_deq_bits_vaddr; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_laddr_is_acc_addr; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_laddr_accumulate; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_laddr_read_full_acc_row; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_laddr_garbage_bit; // @[Scratchpad.scala:254:31]
wire [13:0] _write_issue_q_io_deq_bits_laddr_data; // @[Scratchpad.scala:254:31]
wire [15:0] _write_issue_q_io_deq_bits_len; // @[Scratchpad.scala:254:31]
wire [7:0] _write_issue_q_io_deq_bits_block; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_debug; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_cease; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_wfi; // @[Scratchpad.scala:254:31]
wire [31:0] _write_issue_q_io_deq_bits_status_isa; // @[Scratchpad.scala:254:31]
wire [1:0] _write_issue_q_io_deq_bits_status_dprv; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_dv; // @[Scratchpad.scala:254:31]
wire [1:0] _write_issue_q_io_deq_bits_status_prv; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_v; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_sd; // @[Scratchpad.scala:254:31]
wire [22:0] _write_issue_q_io_deq_bits_status_zero2; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_mpv; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_gva; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_mbe; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_sbe; // @[Scratchpad.scala:254:31]
wire [1:0] _write_issue_q_io_deq_bits_status_sxl; // @[Scratchpad.scala:254:31]
wire [1:0] _write_issue_q_io_deq_bits_status_uxl; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_sd_rv32; // @[Scratchpad.scala:254:31]
wire [7:0] _write_issue_q_io_deq_bits_status_zero1; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_tsr; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_tw; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_tvm; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_mxr; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_sum; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_mprv; // @[Scratchpad.scala:254:31]
wire [1:0] _write_issue_q_io_deq_bits_status_xs; // @[Scratchpad.scala:254:31]
wire [1:0] _write_issue_q_io_deq_bits_status_fs; // @[Scratchpad.scala:254:31]
wire [1:0] _write_issue_q_io_deq_bits_status_mpp; // @[Scratchpad.scala:254:31]
wire [1:0] _write_issue_q_io_deq_bits_status_vs; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_spp; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_mpie; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_ube; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_spie; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_upie; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_mie; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_hie; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_sie; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_status_uie; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_pool_en; // @[Scratchpad.scala:254:31]
wire _write_issue_q_io_deq_bits_store_en; // @[Scratchpad.scala:254:31]
wire _write_scale_q_io_enq_ready; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_valid; // @[Scratchpad.scala:253:31]
wire [39:0] _write_scale_q_io_deq_bits_vaddr; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_laddr_is_acc_addr; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_laddr_accumulate; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_laddr_read_full_acc_row; // @[Scratchpad.scala:253:31]
wire [2:0] _write_scale_q_io_deq_bits_laddr_norm_cmd; // @[Scratchpad.scala:253:31]
wire [10:0] _write_scale_q_io_deq_bits_laddr_garbage; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_laddr_garbage_bit; // @[Scratchpad.scala:253:31]
wire [13:0] _write_scale_q_io_deq_bits_laddr_data; // @[Scratchpad.scala:253:31]
wire [2:0] _write_scale_q_io_deq_bits_acc_act; // @[Scratchpad.scala:253:31]
wire [31:0] _write_scale_q_io_deq_bits_acc_scale; // @[Scratchpad.scala:253:31]
wire [31:0] _write_scale_q_io_deq_bits_acc_igelu_qb; // @[Scratchpad.scala:253:31]
wire [31:0] _write_scale_q_io_deq_bits_acc_igelu_qc; // @[Scratchpad.scala:253:31]
wire [31:0] _write_scale_q_io_deq_bits_acc_iexp_qln2; // @[Scratchpad.scala:253:31]
wire [31:0] _write_scale_q_io_deq_bits_acc_iexp_qln2_inv; // @[Scratchpad.scala:253:31]
wire [7:0] _write_scale_q_io_deq_bits_acc_norm_stats_id; // @[Scratchpad.scala:253:31]
wire [15:0] _write_scale_q_io_deq_bits_len; // @[Scratchpad.scala:253:31]
wire [7:0] _write_scale_q_io_deq_bits_block; // @[Scratchpad.scala:253:31]
wire [7:0] _write_scale_q_io_deq_bits_cmd_id; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_debug; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_cease; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_wfi; // @[Scratchpad.scala:253:31]
wire [31:0] _write_scale_q_io_deq_bits_status_isa; // @[Scratchpad.scala:253:31]
wire [1:0] _write_scale_q_io_deq_bits_status_dprv; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_dv; // @[Scratchpad.scala:253:31]
wire [1:0] _write_scale_q_io_deq_bits_status_prv; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_v; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_sd; // @[Scratchpad.scala:253:31]
wire [22:0] _write_scale_q_io_deq_bits_status_zero2; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_mpv; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_gva; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_mbe; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_sbe; // @[Scratchpad.scala:253:31]
wire [1:0] _write_scale_q_io_deq_bits_status_sxl; // @[Scratchpad.scala:253:31]
wire [1:0] _write_scale_q_io_deq_bits_status_uxl; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_sd_rv32; // @[Scratchpad.scala:253:31]
wire [7:0] _write_scale_q_io_deq_bits_status_zero1; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_tsr; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_tw; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_tvm; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_mxr; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_sum; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_mprv; // @[Scratchpad.scala:253:31]
wire [1:0] _write_scale_q_io_deq_bits_status_xs; // @[Scratchpad.scala:253:31]
wire [1:0] _write_scale_q_io_deq_bits_status_fs; // @[Scratchpad.scala:253:31]
wire [1:0] _write_scale_q_io_deq_bits_status_mpp; // @[Scratchpad.scala:253:31]
wire [1:0] _write_scale_q_io_deq_bits_status_vs; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_spp; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_mpie; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_ube; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_spie; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_upie; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_mie; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_hie; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_sie; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_status_uie; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_pool_en; // @[Scratchpad.scala:253:31]
wire _write_scale_q_io_deq_bits_store_en; // @[Scratchpad.scala:253:31]
wire _write_norm_q_io_enq_ready; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_valid; // @[Scratchpad.scala:252:30]
wire [39:0] _write_norm_q_io_deq_bits_vaddr; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_laddr_is_acc_addr; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_laddr_accumulate; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_laddr_read_full_acc_row; // @[Scratchpad.scala:252:30]
wire [2:0] _write_norm_q_io_deq_bits_laddr_norm_cmd; // @[Scratchpad.scala:252:30]
wire [10:0] _write_norm_q_io_deq_bits_laddr_garbage; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_laddr_garbage_bit; // @[Scratchpad.scala:252:30]
wire [13:0] _write_norm_q_io_deq_bits_laddr_data; // @[Scratchpad.scala:252:30]
wire [2:0] _write_norm_q_io_deq_bits_acc_act; // @[Scratchpad.scala:252:30]
wire [31:0] _write_norm_q_io_deq_bits_acc_scale; // @[Scratchpad.scala:252:30]
wire [31:0] _write_norm_q_io_deq_bits_acc_igelu_qb; // @[Scratchpad.scala:252:30]
wire [31:0] _write_norm_q_io_deq_bits_acc_igelu_qc; // @[Scratchpad.scala:252:30]
wire [31:0] _write_norm_q_io_deq_bits_acc_iexp_qln2; // @[Scratchpad.scala:252:30]
wire [31:0] _write_norm_q_io_deq_bits_acc_iexp_qln2_inv; // @[Scratchpad.scala:252:30]
wire [7:0] _write_norm_q_io_deq_bits_acc_norm_stats_id; // @[Scratchpad.scala:252:30]
wire [15:0] _write_norm_q_io_deq_bits_len; // @[Scratchpad.scala:252:30]
wire [7:0] _write_norm_q_io_deq_bits_block; // @[Scratchpad.scala:252:30]
wire [7:0] _write_norm_q_io_deq_bits_cmd_id; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_debug; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_cease; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_wfi; // @[Scratchpad.scala:252:30]
wire [31:0] _write_norm_q_io_deq_bits_status_isa; // @[Scratchpad.scala:252:30]
wire [1:0] _write_norm_q_io_deq_bits_status_dprv; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_dv; // @[Scratchpad.scala:252:30]
wire [1:0] _write_norm_q_io_deq_bits_status_prv; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_v; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_sd; // @[Scratchpad.scala:252:30]
wire [22:0] _write_norm_q_io_deq_bits_status_zero2; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_mpv; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_gva; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_mbe; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_sbe; // @[Scratchpad.scala:252:30]
wire [1:0] _write_norm_q_io_deq_bits_status_sxl; // @[Scratchpad.scala:252:30]
wire [1:0] _write_norm_q_io_deq_bits_status_uxl; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_sd_rv32; // @[Scratchpad.scala:252:30]
wire [7:0] _write_norm_q_io_deq_bits_status_zero1; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_tsr; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_tw; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_tvm; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_mxr; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_sum; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_mprv; // @[Scratchpad.scala:252:30]
wire [1:0] _write_norm_q_io_deq_bits_status_xs; // @[Scratchpad.scala:252:30]
wire [1:0] _write_norm_q_io_deq_bits_status_fs; // @[Scratchpad.scala:252:30]
wire [1:0] _write_norm_q_io_deq_bits_status_mpp; // @[Scratchpad.scala:252:30]
wire [1:0] _write_norm_q_io_deq_bits_status_vs; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_spp; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_mpie; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_ube; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_spie; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_upie; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_mie; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_hie; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_sie; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_status_uie; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_pool_en; // @[Scratchpad.scala:252:30]
wire _write_norm_q_io_deq_bits_store_en; // @[Scratchpad.scala:252:30]
wire _write_dispatch_q_q_io_deq_valid; // @[Decoupled.scala:362:21]
wire [39:0] _write_dispatch_q_q_io_deq_bits_vaddr; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_laddr_is_acc_addr; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_laddr_accumulate; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_laddr_read_full_acc_row; // @[Decoupled.scala:362:21]
wire [2:0] _write_dispatch_q_q_io_deq_bits_laddr_norm_cmd; // @[Decoupled.scala:362:21]
wire [10:0] _write_dispatch_q_q_io_deq_bits_laddr_garbage; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_laddr_garbage_bit; // @[Decoupled.scala:362:21]
wire [13:0] _write_dispatch_q_q_io_deq_bits_laddr_data; // @[Decoupled.scala:362:21]
wire [2:0] _write_dispatch_q_q_io_deq_bits_acc_act; // @[Decoupled.scala:362:21]
wire [31:0] _write_dispatch_q_q_io_deq_bits_acc_scale; // @[Decoupled.scala:362:21]
wire [31:0] _write_dispatch_q_q_io_deq_bits_acc_igelu_qb; // @[Decoupled.scala:362:21]
wire [31:0] _write_dispatch_q_q_io_deq_bits_acc_igelu_qc; // @[Decoupled.scala:362:21]
wire [31:0] _write_dispatch_q_q_io_deq_bits_acc_iexp_qln2; // @[Decoupled.scala:362:21]
wire [31:0] _write_dispatch_q_q_io_deq_bits_acc_iexp_qln2_inv; // @[Decoupled.scala:362:21]
wire [7:0] _write_dispatch_q_q_io_deq_bits_acc_norm_stats_id; // @[Decoupled.scala:362:21]
wire [15:0] _write_dispatch_q_q_io_deq_bits_len; // @[Decoupled.scala:362:21]
wire [7:0] _write_dispatch_q_q_io_deq_bits_block; // @[Decoupled.scala:362:21]
wire [7:0] _write_dispatch_q_q_io_deq_bits_cmd_id; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_debug; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_cease; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_wfi; // @[Decoupled.scala:362:21]
wire [31:0] _write_dispatch_q_q_io_deq_bits_status_isa; // @[Decoupled.scala:362:21]
wire [1:0] _write_dispatch_q_q_io_deq_bits_status_dprv; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_dv; // @[Decoupled.scala:362:21]
wire [1:0] _write_dispatch_q_q_io_deq_bits_status_prv; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_v; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_sd; // @[Decoupled.scala:362:21]
wire [22:0] _write_dispatch_q_q_io_deq_bits_status_zero2; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_mpv; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_gva; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_mbe; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_sbe; // @[Decoupled.scala:362:21]
wire [1:0] _write_dispatch_q_q_io_deq_bits_status_sxl; // @[Decoupled.scala:362:21]
wire [1:0] _write_dispatch_q_q_io_deq_bits_status_uxl; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_sd_rv32; // @[Decoupled.scala:362:21]
wire [7:0] _write_dispatch_q_q_io_deq_bits_status_zero1; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_tsr; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_tw; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_tvm; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_mxr; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_sum; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_mprv; // @[Decoupled.scala:362:21]
wire [1:0] _write_dispatch_q_q_io_deq_bits_status_xs; // @[Decoupled.scala:362:21]
wire [1:0] _write_dispatch_q_q_io_deq_bits_status_fs; // @[Decoupled.scala:362:21]
wire [1:0] _write_dispatch_q_q_io_deq_bits_status_mpp; // @[Decoupled.scala:362:21]
wire [1:0] _write_dispatch_q_q_io_deq_bits_status_vs; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_spp; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_mpie; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_ube; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_spie; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_upie; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_mie; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_hie; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_sie; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_status_uie; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_pool_en; // @[Decoupled.scala:362:21]
wire _write_dispatch_q_q_io_deq_bits_store_en; // @[Decoupled.scala:362:21]
wire _buffer_2_auto_in_a_ready; // @[Buffer.scala:75:28]
wire _buffer_2_auto_in_d_valid; // @[Buffer.scala:75:28]
wire [2:0] _buffer_2_auto_in_d_bits_opcode; // @[Buffer.scala:75:28]
wire [1:0] _buffer_2_auto_in_d_bits_param; // @[Buffer.scala:75:28]
wire [3:0] _buffer_2_auto_in_d_bits_size; // @[Buffer.scala:75:28]
wire [4:0] _buffer_2_auto_in_d_bits_source; // @[Buffer.scala:75:28]
wire [3:0] _buffer_2_auto_in_d_bits_sink; // @[Buffer.scala:75:28]
wire _buffer_2_auto_in_d_bits_denied; // @[Buffer.scala:75:28]
wire [127:0] _buffer_2_auto_in_d_bits_data; // @[Buffer.scala:75:28]
wire _buffer_2_auto_in_d_bits_corrupt; // @[Buffer.scala:75:28]
wire _buffer_1_auto_in_a_ready; // @[Buffer.scala:75:28]
wire _buffer_1_auto_in_d_valid; // @[Buffer.scala:75:28]
wire [2:0] _buffer_1_auto_in_d_bits_opcode; // @[Buffer.scala:75:28]
wire [1:0] _buffer_1_auto_in_d_bits_param; // @[Buffer.scala:75:28]
wire [3:0] _buffer_1_auto_in_d_bits_size; // @[Buffer.scala:75:28]
wire [3:0] _buffer_1_auto_in_d_bits_source; // @[Buffer.scala:75:28]
wire [3:0] _buffer_1_auto_in_d_bits_sink; // @[Buffer.scala:75:28]
wire _buffer_1_auto_in_d_bits_denied; // @[Buffer.scala:75:28]
wire [127:0] _buffer_1_auto_in_d_bits_data; // @[Buffer.scala:75:28]
wire _buffer_1_auto_in_d_bits_corrupt; // @[Buffer.scala:75:28]
wire _buffer_1_auto_out_a_valid; // @[Buffer.scala:75:28]
wire [2:0] _buffer_1_auto_out_a_bits_opcode; // @[Buffer.scala:75:28]
wire [2:0] _buffer_1_auto_out_a_bits_param; // @[Buffer.scala:75:28]
wire [3:0] _buffer_1_auto_out_a_bits_size; // @[Buffer.scala:75:28]
wire [3:0] _buffer_1_auto_out_a_bits_source; // @[Buffer.scala:75:28]
wire [31:0] _buffer_1_auto_out_a_bits_address; // @[Buffer.scala:75:28]
wire [15:0] _buffer_1_auto_out_a_bits_mask; // @[Buffer.scala:75:28]
wire [127:0] _buffer_1_auto_out_a_bits_data; // @[Buffer.scala:75:28]
wire _buffer_1_auto_out_a_bits_corrupt; // @[Buffer.scala:75:28]
wire _buffer_1_auto_out_d_ready; // @[Buffer.scala:75:28]
wire _buffer_auto_in_a_ready; // @[Buffer.scala:75:28]
wire _buffer_auto_in_d_valid; // @[Buffer.scala:75:28]
wire [2:0] _buffer_auto_in_d_bits_opcode; // @[Buffer.scala:75:28]
wire [1:0] _buffer_auto_in_d_bits_param; // @[Buffer.scala:75:28]
wire [3:0] _buffer_auto_in_d_bits_size; // @[Buffer.scala:75:28]
wire [3:0] _buffer_auto_in_d_bits_source; // @[Buffer.scala:75:28]
wire [3:0] _buffer_auto_in_d_bits_sink; // @[Buffer.scala:75:28]
wire _buffer_auto_in_d_bits_denied; // @[Buffer.scala:75:28]
wire [127:0] _buffer_auto_in_d_bits_data; // @[Buffer.scala:75:28]
wire _buffer_auto_in_d_bits_corrupt; // @[Buffer.scala:75:28]
wire _buffer_auto_out_a_valid; // @[Buffer.scala:75:28]
wire [2:0] _buffer_auto_out_a_bits_opcode; // @[Buffer.scala:75:28]
wire [2:0] _buffer_auto_out_a_bits_param; // @[Buffer.scala:75:28]
wire [3:0] _buffer_auto_out_a_bits_size; // @[Buffer.scala:75:28]
wire [3:0] _buffer_auto_out_a_bits_source; // @[Buffer.scala:75:28]
wire [31:0] _buffer_auto_out_a_bits_address; // @[Buffer.scala:75:28]
wire [15:0] _buffer_auto_out_a_bits_mask; // @[Buffer.scala:75:28]
wire [127:0] _buffer_auto_out_a_bits_data; // @[Buffer.scala:75:28]
wire _buffer_auto_out_a_bits_corrupt; // @[Buffer.scala:75:28]
wire _buffer_auto_out_d_ready; // @[Buffer.scala:75:28]
wire _writer_auto_out_a_valid; // @[Scratchpad.scala:192:26]
wire [2:0] _writer_auto_out_a_bits_opcode; // @[Scratchpad.scala:192:26]
wire [2:0] _writer_auto_out_a_bits_param; // @[Scratchpad.scala:192:26]
wire [3:0] _writer_auto_out_a_bits_size; // @[Scratchpad.scala:192:26]
wire [3:0] _writer_auto_out_a_bits_source; // @[Scratchpad.scala:192:26]
wire [31:0] _writer_auto_out_a_bits_address; // @[Scratchpad.scala:192:26]
wire [15:0] _writer_auto_out_a_bits_mask; // @[Scratchpad.scala:192:26]
wire [127:0] _writer_auto_out_a_bits_data; // @[Scratchpad.scala:192:26]
wire _writer_auto_out_a_bits_corrupt; // @[Scratchpad.scala:192:26]
wire _writer_auto_out_d_ready; // @[Scratchpad.scala:192:26]
wire _writer_io_req_ready; // @[Scratchpad.scala:192:26]
wire _writer_io_busy; // @[Scratchpad.scala:192:26]
wire _reader_auto_core_out_a_valid; // @[Scratchpad.scala:189:26]
wire [2:0] _reader_auto_core_out_a_bits_opcode; // @[Scratchpad.scala:189:26]
wire [2:0] _reader_auto_core_out_a_bits_param; // @[Scratchpad.scala:189:26]
wire [3:0] _reader_auto_core_out_a_bits_size; // @[Scratchpad.scala:189:26]
wire [3:0] _reader_auto_core_out_a_bits_source; // @[Scratchpad.scala:189:26]
wire [31:0] _reader_auto_core_out_a_bits_address; // @[Scratchpad.scala:189:26]
wire [15:0] _reader_auto_core_out_a_bits_mask; // @[Scratchpad.scala:189:26]
wire [127:0] _reader_auto_core_out_a_bits_data; // @[Scratchpad.scala:189:26]
wire _reader_auto_core_out_a_bits_corrupt; // @[Scratchpad.scala:189:26]
wire _reader_auto_core_out_d_ready; // @[Scratchpad.scala:189:26]
wire _reader_io_req_ready; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_valid; // @[Scratchpad.scala:189:26]
wire [511:0] _reader_io_resp_bits_data; // @[Scratchpad.scala:189:26]
wire [13:0] _reader_io_resp_bits_addr; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_0; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_1; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_2; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_3; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_4; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_5; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_6; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_7; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_8; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_9; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_10; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_11; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_12; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_13; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_14; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_15; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_16; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_17; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_18; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_19; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_20; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_21; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_22; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_23; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_24; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_25; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_26; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_27; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_28; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_29; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_30; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_31; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_32; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_33; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_34; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_35; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_36; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_37; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_38; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_39; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_40; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_41; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_42; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_43; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_44; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_45; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_46; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_47; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_48; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_49; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_50; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_51; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_52; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_53; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_54; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_55; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_56; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_57; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_58; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_59; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_60; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_61; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_62; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_mask_63; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_is_acc; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_accumulate; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_has_acc_bitwidth; // @[Scratchpad.scala:189:26]
wire [31:0] _reader_io_resp_bits_scale; // @[Scratchpad.scala:189:26]
wire [15:0] _reader_io_resp_bits_repeats; // @[Scratchpad.scala:189:26]
wire [15:0] _reader_io_resp_bits_pixel_repeats; // @[Scratchpad.scala:189:26]
wire [15:0] _reader_io_resp_bits_len; // @[Scratchpad.scala:189:26]
wire _reader_io_resp_bits_last; // @[Scratchpad.scala:189:26]
wire [7:0] _reader_io_resp_bits_bytes_read; // @[Scratchpad.scala:189:26]
wire [7:0] _reader_io_resp_bits_cmd_id; // @[Scratchpad.scala:189:26]
wire _reader_io_busy; // @[Scratchpad.scala:189:26]
wire _xbar_auto_anon_in_1_a_ready; // @[Xbar.scala:346:26]
wire _xbar_auto_anon_in_1_d_valid; // @[Xbar.scala:346:26]
wire [2:0] _xbar_auto_anon_in_1_d_bits_opcode; // @[Xbar.scala:346:26]
wire [1:0] _xbar_auto_anon_in_1_d_bits_param; // @[Xbar.scala:346:26]
wire [3:0] _xbar_auto_anon_in_1_d_bits_size; // @[Xbar.scala:346:26]
wire [3:0] _xbar_auto_anon_in_1_d_bits_source; // @[Xbar.scala:346:26]
wire [3:0] _xbar_auto_anon_in_1_d_bits_sink; // @[Xbar.scala:346:26]
wire _xbar_auto_anon_in_1_d_bits_denied; // @[Xbar.scala:346:26]
wire [127:0] _xbar_auto_anon_in_1_d_bits_data; // @[Xbar.scala:346:26]
wire _xbar_auto_anon_in_1_d_bits_corrupt; // @[Xbar.scala:346:26]
wire _xbar_auto_anon_in_0_a_ready; // @[Xbar.scala:346:26]
wire _xbar_auto_anon_in_0_d_valid; // @[Xbar.scala:346:26]
wire [2:0] _xbar_auto_anon_in_0_d_bits_opcode; // @[Xbar.scala:346:26]
wire [1:0] _xbar_auto_anon_in_0_d_bits_param; // @[Xbar.scala:346:26]
wire [3:0] _xbar_auto_anon_in_0_d_bits_size; // @[Xbar.scala:346:26]
wire [3:0] _xbar_auto_anon_in_0_d_bits_source; // @[Xbar.scala:346:26]
wire [3:0] _xbar_auto_anon_in_0_d_bits_sink; // @[Xbar.scala:346:26]
wire _xbar_auto_anon_in_0_d_bits_denied; // @[Xbar.scala:346:26]
wire [127:0] _xbar_auto_anon_in_0_d_bits_data; // @[Xbar.scala:346:26]
wire _xbar_auto_anon_in_0_d_bits_corrupt; // @[Xbar.scala:346:26]
wire _xbar_auto_anon_out_a_valid; // @[Xbar.scala:346:26]
wire [2:0] _xbar_auto_anon_out_a_bits_opcode; // @[Xbar.scala:346:26]
wire [2:0] _xbar_auto_anon_out_a_bits_param; // @[Xbar.scala:346:26]
wire [3:0] _xbar_auto_anon_out_a_bits_size; // @[Xbar.scala:346:26]
wire [4:0] _xbar_auto_anon_out_a_bits_source; // @[Xbar.scala:346:26]
wire [31:0] _xbar_auto_anon_out_a_bits_address; // @[Xbar.scala:346:26]
wire [15:0] _xbar_auto_anon_out_a_bits_mask; // @[Xbar.scala:346:26]
wire [127:0] _xbar_auto_anon_out_a_bits_data; // @[Xbar.scala:346:26]
wire _xbar_auto_anon_out_a_bits_corrupt; // @[Xbar.scala:346:26]
wire _xbar_auto_anon_out_d_ready; // @[Xbar.scala:346:26]
wire auto_id_out_a_ready_0 = auto_id_out_a_ready; // @[Scratchpad.scala:205:9]
wire auto_id_out_d_valid_0 = auto_id_out_d_valid; // @[Scratchpad.scala:205:9]
wire [2:0] auto_id_out_d_bits_opcode_0 = auto_id_out_d_bits_opcode; // @[Scratchpad.scala:205:9]
wire [1:0] auto_id_out_d_bits_param_0 = auto_id_out_d_bits_param; // @[Scratchpad.scala:205:9]
wire [3:0] auto_id_out_d_bits_size_0 = auto_id_out_d_bits_size; // @[Scratchpad.scala:205:9]
wire [4:0] auto_id_out_d_bits_source_0 = auto_id_out_d_bits_source; // @[Scratchpad.scala:205:9]
wire [3:0] auto_id_out_d_bits_sink_0 = auto_id_out_d_bits_sink; // @[Scratchpad.scala:205:9]
wire auto_id_out_d_bits_denied_0 = auto_id_out_d_bits_denied; // @[Scratchpad.scala:205:9]
wire [127:0] auto_id_out_d_bits_data_0 = auto_id_out_d_bits_data; // @[Scratchpad.scala:205:9]
wire auto_id_out_d_bits_corrupt_0 = auto_id_out_d_bits_corrupt; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_valid_0 = io_dma_read_req_valid; // @[Scratchpad.scala:205:9]
wire [39:0] io_dma_read_req_bits_vaddr_0 = io_dma_read_req_bits_vaddr; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_laddr_is_acc_addr_0 = io_dma_read_req_bits_laddr_is_acc_addr; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_laddr_accumulate_0 = io_dma_read_req_bits_laddr_accumulate; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_laddr_read_full_acc_row_0 = io_dma_read_req_bits_laddr_read_full_acc_row; // @[Scratchpad.scala:205:9]
wire [2:0] io_dma_read_req_bits_laddr_norm_cmd_0 = io_dma_read_req_bits_laddr_norm_cmd; // @[Scratchpad.scala:205:9]
wire [10:0] io_dma_read_req_bits_laddr_garbage_0 = io_dma_read_req_bits_laddr_garbage; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_laddr_garbage_bit_0 = io_dma_read_req_bits_laddr_garbage_bit; // @[Scratchpad.scala:205:9]
wire [13:0] io_dma_read_req_bits_laddr_data_0 = io_dma_read_req_bits_laddr_data; // @[Scratchpad.scala:205:9]
wire [15:0] io_dma_read_req_bits_cols_0 = io_dma_read_req_bits_cols; // @[Scratchpad.scala:205:9]
wire [15:0] io_dma_read_req_bits_repeats_0 = io_dma_read_req_bits_repeats; // @[Scratchpad.scala:205:9]
wire [31:0] io_dma_read_req_bits_scale_0 = io_dma_read_req_bits_scale; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_has_acc_bitwidth_0 = io_dma_read_req_bits_has_acc_bitwidth; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_all_zeros_0 = io_dma_read_req_bits_all_zeros; // @[Scratchpad.scala:205:9]
wire [15:0] io_dma_read_req_bits_block_stride_0 = io_dma_read_req_bits_block_stride; // @[Scratchpad.scala:205:9]
wire [7:0] io_dma_read_req_bits_pixel_repeats_0 = io_dma_read_req_bits_pixel_repeats; // @[Scratchpad.scala:205:9]
wire [7:0] io_dma_read_req_bits_cmd_id_0 = io_dma_read_req_bits_cmd_id; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_debug_0 = io_dma_read_req_bits_status_debug; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_cease_0 = io_dma_read_req_bits_status_cease; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_wfi_0 = io_dma_read_req_bits_status_wfi; // @[Scratchpad.scala:205:9]
wire [31:0] io_dma_read_req_bits_status_isa_0 = io_dma_read_req_bits_status_isa; // @[Scratchpad.scala:205:9]
wire [1:0] io_dma_read_req_bits_status_dprv_0 = io_dma_read_req_bits_status_dprv; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_dv_0 = io_dma_read_req_bits_status_dv; // @[Scratchpad.scala:205:9]
wire [1:0] io_dma_read_req_bits_status_prv_0 = io_dma_read_req_bits_status_prv; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_v_0 = io_dma_read_req_bits_status_v; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_sd_0 = io_dma_read_req_bits_status_sd; // @[Scratchpad.scala:205:9]
wire [22:0] io_dma_read_req_bits_status_zero2_0 = io_dma_read_req_bits_status_zero2; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_mpv_0 = io_dma_read_req_bits_status_mpv; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_gva_0 = io_dma_read_req_bits_status_gva; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_mbe_0 = io_dma_read_req_bits_status_mbe; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_sbe_0 = io_dma_read_req_bits_status_sbe; // @[Scratchpad.scala:205:9]
wire [1:0] io_dma_read_req_bits_status_sxl_0 = io_dma_read_req_bits_status_sxl; // @[Scratchpad.scala:205:9]
wire [1:0] io_dma_read_req_bits_status_uxl_0 = io_dma_read_req_bits_status_uxl; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_sd_rv32_0 = io_dma_read_req_bits_status_sd_rv32; // @[Scratchpad.scala:205:9]
wire [7:0] io_dma_read_req_bits_status_zero1_0 = io_dma_read_req_bits_status_zero1; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_tsr_0 = io_dma_read_req_bits_status_tsr; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_tw_0 = io_dma_read_req_bits_status_tw; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_tvm_0 = io_dma_read_req_bits_status_tvm; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_mxr_0 = io_dma_read_req_bits_status_mxr; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_sum_0 = io_dma_read_req_bits_status_sum; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_mprv_0 = io_dma_read_req_bits_status_mprv; // @[Scratchpad.scala:205:9]
wire [1:0] io_dma_read_req_bits_status_xs_0 = io_dma_read_req_bits_status_xs; // @[Scratchpad.scala:205:9]
wire [1:0] io_dma_read_req_bits_status_fs_0 = io_dma_read_req_bits_status_fs; // @[Scratchpad.scala:205:9]
wire [1:0] io_dma_read_req_bits_status_mpp_0 = io_dma_read_req_bits_status_mpp; // @[Scratchpad.scala:205:9]
wire [1:0] io_dma_read_req_bits_status_vs_0 = io_dma_read_req_bits_status_vs; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_spp_0 = io_dma_read_req_bits_status_spp; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_mpie_0 = io_dma_read_req_bits_status_mpie; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_ube_0 = io_dma_read_req_bits_status_ube; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_spie_0 = io_dma_read_req_bits_status_spie; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_upie_0 = io_dma_read_req_bits_status_upie; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_mie_0 = io_dma_read_req_bits_status_mie; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_hie_0 = io_dma_read_req_bits_status_hie; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_sie_0 = io_dma_read_req_bits_status_sie; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_bits_status_uie_0 = io_dma_read_req_bits_status_uie; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_valid_0 = io_dma_write_req_valid; // @[Scratchpad.scala:205:9]
wire [39:0] io_dma_write_req_bits_vaddr_0 = io_dma_write_req_bits_vaddr; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_laddr_is_acc_addr_0 = io_dma_write_req_bits_laddr_is_acc_addr; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_laddr_accumulate_0 = io_dma_write_req_bits_laddr_accumulate; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_laddr_read_full_acc_row_0 = io_dma_write_req_bits_laddr_read_full_acc_row; // @[Scratchpad.scala:205:9]
wire [10:0] io_dma_write_req_bits_laddr_garbage_0 = io_dma_write_req_bits_laddr_garbage; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_laddr_garbage_bit_0 = io_dma_write_req_bits_laddr_garbage_bit; // @[Scratchpad.scala:205:9]
wire [13:0] io_dma_write_req_bits_laddr_data_0 = io_dma_write_req_bits_laddr_data; // @[Scratchpad.scala:205:9]
wire [2:0] io_dma_write_req_bits_acc_act_0 = io_dma_write_req_bits_acc_act; // @[Scratchpad.scala:205:9]
wire [31:0] io_dma_write_req_bits_acc_scale_0 = io_dma_write_req_bits_acc_scale; // @[Scratchpad.scala:205:9]
wire [15:0] io_dma_write_req_bits_len_0 = io_dma_write_req_bits_len; // @[Scratchpad.scala:205:9]
wire [7:0] io_dma_write_req_bits_block_0 = io_dma_write_req_bits_block; // @[Scratchpad.scala:205:9]
wire [7:0] io_dma_write_req_bits_cmd_id_0 = io_dma_write_req_bits_cmd_id; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_debug_0 = io_dma_write_req_bits_status_debug; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_cease_0 = io_dma_write_req_bits_status_cease; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_wfi_0 = io_dma_write_req_bits_status_wfi; // @[Scratchpad.scala:205:9]
wire [31:0] io_dma_write_req_bits_status_isa_0 = io_dma_write_req_bits_status_isa; // @[Scratchpad.scala:205:9]
wire [1:0] io_dma_write_req_bits_status_dprv_0 = io_dma_write_req_bits_status_dprv; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_dv_0 = io_dma_write_req_bits_status_dv; // @[Scratchpad.scala:205:9]
wire [1:0] io_dma_write_req_bits_status_prv_0 = io_dma_write_req_bits_status_prv; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_v_0 = io_dma_write_req_bits_status_v; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_sd_0 = io_dma_write_req_bits_status_sd; // @[Scratchpad.scala:205:9]
wire [22:0] io_dma_write_req_bits_status_zero2_0 = io_dma_write_req_bits_status_zero2; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_mpv_0 = io_dma_write_req_bits_status_mpv; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_gva_0 = io_dma_write_req_bits_status_gva; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_mbe_0 = io_dma_write_req_bits_status_mbe; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_sbe_0 = io_dma_write_req_bits_status_sbe; // @[Scratchpad.scala:205:9]
wire [1:0] io_dma_write_req_bits_status_sxl_0 = io_dma_write_req_bits_status_sxl; // @[Scratchpad.scala:205:9]
wire [1:0] io_dma_write_req_bits_status_uxl_0 = io_dma_write_req_bits_status_uxl; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_sd_rv32_0 = io_dma_write_req_bits_status_sd_rv32; // @[Scratchpad.scala:205:9]
wire [7:0] io_dma_write_req_bits_status_zero1_0 = io_dma_write_req_bits_status_zero1; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_tsr_0 = io_dma_write_req_bits_status_tsr; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_tw_0 = io_dma_write_req_bits_status_tw; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_tvm_0 = io_dma_write_req_bits_status_tvm; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_mxr_0 = io_dma_write_req_bits_status_mxr; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_sum_0 = io_dma_write_req_bits_status_sum; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_mprv_0 = io_dma_write_req_bits_status_mprv; // @[Scratchpad.scala:205:9]
wire [1:0] io_dma_write_req_bits_status_xs_0 = io_dma_write_req_bits_status_xs; // @[Scratchpad.scala:205:9]
wire [1:0] io_dma_write_req_bits_status_fs_0 = io_dma_write_req_bits_status_fs; // @[Scratchpad.scala:205:9]
wire [1:0] io_dma_write_req_bits_status_mpp_0 = io_dma_write_req_bits_status_mpp; // @[Scratchpad.scala:205:9]
wire [1:0] io_dma_write_req_bits_status_vs_0 = io_dma_write_req_bits_status_vs; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_spp_0 = io_dma_write_req_bits_status_spp; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_mpie_0 = io_dma_write_req_bits_status_mpie; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_ube_0 = io_dma_write_req_bits_status_ube; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_spie_0 = io_dma_write_req_bits_status_spie; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_upie_0 = io_dma_write_req_bits_status_upie; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_mie_0 = io_dma_write_req_bits_status_mie; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_hie_0 = io_dma_write_req_bits_status_hie; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_sie_0 = io_dma_write_req_bits_status_sie; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_status_uie_0 = io_dma_write_req_bits_status_uie; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_pool_en_0 = io_dma_write_req_bits_pool_en; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_bits_store_en_0 = io_dma_write_req_bits_store_en; // @[Scratchpad.scala:205:9]
wire io_srams_read_0_req_valid_0 = io_srams_read_0_req_valid; // @[Scratchpad.scala:205:9]
wire [11:0] io_srams_read_0_req_bits_addr_0 = io_srams_read_0_req_bits_addr; // @[Scratchpad.scala:205:9]
wire io_srams_read_0_resp_ready_0 = io_srams_read_0_resp_ready; // @[Scratchpad.scala:205:9]
wire io_srams_read_1_req_valid_0 = io_srams_read_1_req_valid; // @[Scratchpad.scala:205:9]
wire [11:0] io_srams_read_1_req_bits_addr_0 = io_srams_read_1_req_bits_addr; // @[Scratchpad.scala:205:9]
wire io_srams_read_1_resp_ready_0 = io_srams_read_1_resp_ready; // @[Scratchpad.scala:205:9]
wire io_srams_read_2_req_valid_0 = io_srams_read_2_req_valid; // @[Scratchpad.scala:205:9]
wire [11:0] io_srams_read_2_req_bits_addr_0 = io_srams_read_2_req_bits_addr; // @[Scratchpad.scala:205:9]
wire io_srams_read_2_resp_ready_0 = io_srams_read_2_resp_ready; // @[Scratchpad.scala:205:9]
wire io_srams_read_3_req_valid_0 = io_srams_read_3_req_valid; // @[Scratchpad.scala:205:9]
wire [11:0] io_srams_read_3_req_bits_addr_0 = io_srams_read_3_req_bits_addr; // @[Scratchpad.scala:205:9]
wire io_srams_read_3_resp_ready_0 = io_srams_read_3_resp_ready; // @[Scratchpad.scala:205:9]
wire io_srams_write_0_en_0 = io_srams_write_0_en; // @[Scratchpad.scala:205:9]
wire [11:0] io_srams_write_0_addr_0 = io_srams_write_0_addr; // @[Scratchpad.scala:205:9]
wire io_srams_write_0_mask_0_0 = io_srams_write_0_mask_0; // @[Scratchpad.scala:205:9]
wire io_srams_write_0_mask_1_0 = io_srams_write_0_mask_1; // @[Scratchpad.scala:205:9]
wire io_srams_write_0_mask_2_0 = io_srams_write_0_mask_2; // @[Scratchpad.scala:205:9]
wire io_srams_write_0_mask_3_0 = io_srams_write_0_mask_3; // @[Scratchpad.scala:205:9]
wire io_srams_write_0_mask_4_0 = io_srams_write_0_mask_4; // @[Scratchpad.scala:205:9]
wire io_srams_write_0_mask_5_0 = io_srams_write_0_mask_5; // @[Scratchpad.scala:205:9]
wire io_srams_write_0_mask_6_0 = io_srams_write_0_mask_6; // @[Scratchpad.scala:205:9]
wire io_srams_write_0_mask_7_0 = io_srams_write_0_mask_7; // @[Scratchpad.scala:205:9]
wire io_srams_write_0_mask_8_0 = io_srams_write_0_mask_8; // @[Scratchpad.scala:205:9]
wire io_srams_write_0_mask_9_0 = io_srams_write_0_mask_9; // @[Scratchpad.scala:205:9]
wire io_srams_write_0_mask_10_0 = io_srams_write_0_mask_10; // @[Scratchpad.scala:205:9]
wire io_srams_write_0_mask_11_0 = io_srams_write_0_mask_11; // @[Scratchpad.scala:205:9]
wire io_srams_write_0_mask_12_0 = io_srams_write_0_mask_12; // @[Scratchpad.scala:205:9]
wire io_srams_write_0_mask_13_0 = io_srams_write_0_mask_13; // @[Scratchpad.scala:205:9]
wire io_srams_write_0_mask_14_0 = io_srams_write_0_mask_14; // @[Scratchpad.scala:205:9]
wire io_srams_write_0_mask_15_0 = io_srams_write_0_mask_15; // @[Scratchpad.scala:205:9]
wire [127:0] io_srams_write_0_data_0 = io_srams_write_0_data; // @[Scratchpad.scala:205:9]
wire io_srams_write_1_en_0 = io_srams_write_1_en; // @[Scratchpad.scala:205:9]
wire [11:0] io_srams_write_1_addr_0 = io_srams_write_1_addr; // @[Scratchpad.scala:205:9]
wire io_srams_write_1_mask_0_0 = io_srams_write_1_mask_0; // @[Scratchpad.scala:205:9]
wire io_srams_write_1_mask_1_0 = io_srams_write_1_mask_1; // @[Scratchpad.scala:205:9]
wire io_srams_write_1_mask_2_0 = io_srams_write_1_mask_2; // @[Scratchpad.scala:205:9]
wire io_srams_write_1_mask_3_0 = io_srams_write_1_mask_3; // @[Scratchpad.scala:205:9]
wire io_srams_write_1_mask_4_0 = io_srams_write_1_mask_4; // @[Scratchpad.scala:205:9]
wire io_srams_write_1_mask_5_0 = io_srams_write_1_mask_5; // @[Scratchpad.scala:205:9]
wire io_srams_write_1_mask_6_0 = io_srams_write_1_mask_6; // @[Scratchpad.scala:205:9]
wire io_srams_write_1_mask_7_0 = io_srams_write_1_mask_7; // @[Scratchpad.scala:205:9]
wire io_srams_write_1_mask_8_0 = io_srams_write_1_mask_8; // @[Scratchpad.scala:205:9]
wire io_srams_write_1_mask_9_0 = io_srams_write_1_mask_9; // @[Scratchpad.scala:205:9]
wire io_srams_write_1_mask_10_0 = io_srams_write_1_mask_10; // @[Scratchpad.scala:205:9]
wire io_srams_write_1_mask_11_0 = io_srams_write_1_mask_11; // @[Scratchpad.scala:205:9]
wire io_srams_write_1_mask_12_0 = io_srams_write_1_mask_12; // @[Scratchpad.scala:205:9]
wire io_srams_write_1_mask_13_0 = io_srams_write_1_mask_13; // @[Scratchpad.scala:205:9]
wire io_srams_write_1_mask_14_0 = io_srams_write_1_mask_14; // @[Scratchpad.scala:205:9]
wire io_srams_write_1_mask_15_0 = io_srams_write_1_mask_15; // @[Scratchpad.scala:205:9]
wire [127:0] io_srams_write_1_data_0 = io_srams_write_1_data; // @[Scratchpad.scala:205:9]
wire io_srams_write_2_en_0 = io_srams_write_2_en; // @[Scratchpad.scala:205:9]
wire [11:0] io_srams_write_2_addr_0 = io_srams_write_2_addr; // @[Scratchpad.scala:205:9]
wire io_srams_write_2_mask_0_0 = io_srams_write_2_mask_0; // @[Scratchpad.scala:205:9]
wire io_srams_write_2_mask_1_0 = io_srams_write_2_mask_1; // @[Scratchpad.scala:205:9]
wire io_srams_write_2_mask_2_0 = io_srams_write_2_mask_2; // @[Scratchpad.scala:205:9]
wire io_srams_write_2_mask_3_0 = io_srams_write_2_mask_3; // @[Scratchpad.scala:205:9]
wire io_srams_write_2_mask_4_0 = io_srams_write_2_mask_4; // @[Scratchpad.scala:205:9]
wire io_srams_write_2_mask_5_0 = io_srams_write_2_mask_5; // @[Scratchpad.scala:205:9]
wire io_srams_write_2_mask_6_0 = io_srams_write_2_mask_6; // @[Scratchpad.scala:205:9]
wire io_srams_write_2_mask_7_0 = io_srams_write_2_mask_7; // @[Scratchpad.scala:205:9]
wire io_srams_write_2_mask_8_0 = io_srams_write_2_mask_8; // @[Scratchpad.scala:205:9]
wire io_srams_write_2_mask_9_0 = io_srams_write_2_mask_9; // @[Scratchpad.scala:205:9]
wire io_srams_write_2_mask_10_0 = io_srams_write_2_mask_10; // @[Scratchpad.scala:205:9]
wire io_srams_write_2_mask_11_0 = io_srams_write_2_mask_11; // @[Scratchpad.scala:205:9]
wire io_srams_write_2_mask_12_0 = io_srams_write_2_mask_12; // @[Scratchpad.scala:205:9]
wire io_srams_write_2_mask_13_0 = io_srams_write_2_mask_13; // @[Scratchpad.scala:205:9]
wire io_srams_write_2_mask_14_0 = io_srams_write_2_mask_14; // @[Scratchpad.scala:205:9]
wire io_srams_write_2_mask_15_0 = io_srams_write_2_mask_15; // @[Scratchpad.scala:205:9]
wire [127:0] io_srams_write_2_data_0 = io_srams_write_2_data; // @[Scratchpad.scala:205:9]
wire io_srams_write_3_en_0 = io_srams_write_3_en; // @[Scratchpad.scala:205:9]
wire [11:0] io_srams_write_3_addr_0 = io_srams_write_3_addr; // @[Scratchpad.scala:205:9]
wire io_srams_write_3_mask_0_0 = io_srams_write_3_mask_0; // @[Scratchpad.scala:205:9]
wire io_srams_write_3_mask_1_0 = io_srams_write_3_mask_1; // @[Scratchpad.scala:205:9]
wire io_srams_write_3_mask_2_0 = io_srams_write_3_mask_2; // @[Scratchpad.scala:205:9]
wire io_srams_write_3_mask_3_0 = io_srams_write_3_mask_3; // @[Scratchpad.scala:205:9]
wire io_srams_write_3_mask_4_0 = io_srams_write_3_mask_4; // @[Scratchpad.scala:205:9]
wire io_srams_write_3_mask_5_0 = io_srams_write_3_mask_5; // @[Scratchpad.scala:205:9]
wire io_srams_write_3_mask_6_0 = io_srams_write_3_mask_6; // @[Scratchpad.scala:205:9]
wire io_srams_write_3_mask_7_0 = io_srams_write_3_mask_7; // @[Scratchpad.scala:205:9]
wire io_srams_write_3_mask_8_0 = io_srams_write_3_mask_8; // @[Scratchpad.scala:205:9]
wire io_srams_write_3_mask_9_0 = io_srams_write_3_mask_9; // @[Scratchpad.scala:205:9]
wire io_srams_write_3_mask_10_0 = io_srams_write_3_mask_10; // @[Scratchpad.scala:205:9]
wire io_srams_write_3_mask_11_0 = io_srams_write_3_mask_11; // @[Scratchpad.scala:205:9]
wire io_srams_write_3_mask_12_0 = io_srams_write_3_mask_12; // @[Scratchpad.scala:205:9]
wire io_srams_write_3_mask_13_0 = io_srams_write_3_mask_13; // @[Scratchpad.scala:205:9]
wire io_srams_write_3_mask_14_0 = io_srams_write_3_mask_14; // @[Scratchpad.scala:205:9]
wire io_srams_write_3_mask_15_0 = io_srams_write_3_mask_15; // @[Scratchpad.scala:205:9]
wire [127:0] io_srams_write_3_data_0 = io_srams_write_3_data; // @[Scratchpad.scala:205:9]
wire io_acc_read_req_0_valid_0 = io_acc_read_req_0_valid; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_req_0_bits_scale_bits_0 = io_acc_read_req_0_bits_scale_bits; // @[Scratchpad.scala:205:9]
wire [8:0] io_acc_read_req_0_bits_addr_0 = io_acc_read_req_0_bits_addr; // @[Scratchpad.scala:205:9]
wire [2:0] io_acc_read_req_0_bits_act_0 = io_acc_read_req_0_bits_act; // @[Scratchpad.scala:205:9]
wire io_acc_read_req_1_valid_0 = io_acc_read_req_1_valid; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_req_1_bits_scale_bits_0 = io_acc_read_req_1_bits_scale_bits; // @[Scratchpad.scala:205:9]
wire [8:0] io_acc_read_req_1_bits_addr_0 = io_acc_read_req_1_bits_addr; // @[Scratchpad.scala:205:9]
wire [2:0] io_acc_read_req_1_bits_act_0 = io_acc_read_req_1_bits_act; // @[Scratchpad.scala:205:9]
wire io_acc_read_resp_0_ready_0 = io_acc_read_resp_0_ready; // @[Scratchpad.scala:205:9]
wire io_acc_read_resp_1_ready_0 = io_acc_read_resp_1_ready; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_valid_0 = io_acc_write_0_valid; // @[Scratchpad.scala:205:9]
wire [8:0] io_acc_write_0_bits_addr_0 = io_acc_write_0_bits_addr; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_0_bits_data_0_0_0 = io_acc_write_0_bits_data_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_0_bits_data_1_0_0 = io_acc_write_0_bits_data_1_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_0_bits_data_2_0_0 = io_acc_write_0_bits_data_2_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_0_bits_data_3_0_0 = io_acc_write_0_bits_data_3_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_0_bits_data_4_0_0 = io_acc_write_0_bits_data_4_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_0_bits_data_5_0_0 = io_acc_write_0_bits_data_5_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_0_bits_data_6_0_0 = io_acc_write_0_bits_data_6_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_0_bits_data_7_0_0 = io_acc_write_0_bits_data_7_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_0_bits_data_8_0_0 = io_acc_write_0_bits_data_8_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_0_bits_data_9_0_0 = io_acc_write_0_bits_data_9_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_0_bits_data_10_0_0 = io_acc_write_0_bits_data_10_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_0_bits_data_11_0_0 = io_acc_write_0_bits_data_11_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_0_bits_data_12_0_0 = io_acc_write_0_bits_data_12_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_0_bits_data_13_0_0 = io_acc_write_0_bits_data_13_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_0_bits_data_14_0_0 = io_acc_write_0_bits_data_14_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_0_bits_data_15_0_0 = io_acc_write_0_bits_data_15_0; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_acc_0 = io_acc_write_0_bits_acc; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_0_0 = io_acc_write_0_bits_mask_0; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_1_0 = io_acc_write_0_bits_mask_1; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_2_0 = io_acc_write_0_bits_mask_2; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_3_0 = io_acc_write_0_bits_mask_3; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_4_0 = io_acc_write_0_bits_mask_4; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_5_0 = io_acc_write_0_bits_mask_5; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_6_0 = io_acc_write_0_bits_mask_6; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_7_0 = io_acc_write_0_bits_mask_7; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_8_0 = io_acc_write_0_bits_mask_8; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_9_0 = io_acc_write_0_bits_mask_9; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_10_0 = io_acc_write_0_bits_mask_10; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_11_0 = io_acc_write_0_bits_mask_11; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_12_0 = io_acc_write_0_bits_mask_12; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_13_0 = io_acc_write_0_bits_mask_13; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_14_0 = io_acc_write_0_bits_mask_14; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_15_0 = io_acc_write_0_bits_mask_15; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_16_0 = io_acc_write_0_bits_mask_16; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_17_0 = io_acc_write_0_bits_mask_17; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_18_0 = io_acc_write_0_bits_mask_18; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_19_0 = io_acc_write_0_bits_mask_19; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_20_0 = io_acc_write_0_bits_mask_20; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_21_0 = io_acc_write_0_bits_mask_21; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_22_0 = io_acc_write_0_bits_mask_22; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_23_0 = io_acc_write_0_bits_mask_23; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_24_0 = io_acc_write_0_bits_mask_24; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_25_0 = io_acc_write_0_bits_mask_25; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_26_0 = io_acc_write_0_bits_mask_26; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_27_0 = io_acc_write_0_bits_mask_27; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_28_0 = io_acc_write_0_bits_mask_28; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_29_0 = io_acc_write_0_bits_mask_29; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_30_0 = io_acc_write_0_bits_mask_30; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_31_0 = io_acc_write_0_bits_mask_31; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_32_0 = io_acc_write_0_bits_mask_32; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_33_0 = io_acc_write_0_bits_mask_33; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_34_0 = io_acc_write_0_bits_mask_34; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_35_0 = io_acc_write_0_bits_mask_35; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_36_0 = io_acc_write_0_bits_mask_36; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_37_0 = io_acc_write_0_bits_mask_37; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_38_0 = io_acc_write_0_bits_mask_38; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_39_0 = io_acc_write_0_bits_mask_39; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_40_0 = io_acc_write_0_bits_mask_40; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_41_0 = io_acc_write_0_bits_mask_41; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_42_0 = io_acc_write_0_bits_mask_42; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_43_0 = io_acc_write_0_bits_mask_43; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_44_0 = io_acc_write_0_bits_mask_44; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_45_0 = io_acc_write_0_bits_mask_45; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_46_0 = io_acc_write_0_bits_mask_46; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_47_0 = io_acc_write_0_bits_mask_47; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_48_0 = io_acc_write_0_bits_mask_48; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_49_0 = io_acc_write_0_bits_mask_49; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_50_0 = io_acc_write_0_bits_mask_50; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_51_0 = io_acc_write_0_bits_mask_51; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_52_0 = io_acc_write_0_bits_mask_52; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_53_0 = io_acc_write_0_bits_mask_53; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_54_0 = io_acc_write_0_bits_mask_54; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_55_0 = io_acc_write_0_bits_mask_55; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_56_0 = io_acc_write_0_bits_mask_56; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_57_0 = io_acc_write_0_bits_mask_57; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_58_0 = io_acc_write_0_bits_mask_58; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_59_0 = io_acc_write_0_bits_mask_59; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_60_0 = io_acc_write_0_bits_mask_60; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_61_0 = io_acc_write_0_bits_mask_61; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_62_0 = io_acc_write_0_bits_mask_62; // @[Scratchpad.scala:205:9]
wire io_acc_write_0_bits_mask_63_0 = io_acc_write_0_bits_mask_63; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_valid_0 = io_acc_write_1_valid; // @[Scratchpad.scala:205:9]
wire [8:0] io_acc_write_1_bits_addr_0 = io_acc_write_1_bits_addr; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_1_bits_data_0_0_0 = io_acc_write_1_bits_data_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_1_bits_data_1_0_0 = io_acc_write_1_bits_data_1_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_1_bits_data_2_0_0 = io_acc_write_1_bits_data_2_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_1_bits_data_3_0_0 = io_acc_write_1_bits_data_3_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_1_bits_data_4_0_0 = io_acc_write_1_bits_data_4_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_1_bits_data_5_0_0 = io_acc_write_1_bits_data_5_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_1_bits_data_6_0_0 = io_acc_write_1_bits_data_6_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_1_bits_data_7_0_0 = io_acc_write_1_bits_data_7_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_1_bits_data_8_0_0 = io_acc_write_1_bits_data_8_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_1_bits_data_9_0_0 = io_acc_write_1_bits_data_9_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_1_bits_data_10_0_0 = io_acc_write_1_bits_data_10_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_1_bits_data_11_0_0 = io_acc_write_1_bits_data_11_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_1_bits_data_12_0_0 = io_acc_write_1_bits_data_12_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_1_bits_data_13_0_0 = io_acc_write_1_bits_data_13_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_1_bits_data_14_0_0 = io_acc_write_1_bits_data_14_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_write_1_bits_data_15_0_0 = io_acc_write_1_bits_data_15_0; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_acc_0 = io_acc_write_1_bits_acc; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_0_0 = io_acc_write_1_bits_mask_0; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_1_0 = io_acc_write_1_bits_mask_1; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_2_0 = io_acc_write_1_bits_mask_2; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_3_0 = io_acc_write_1_bits_mask_3; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_4_0 = io_acc_write_1_bits_mask_4; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_5_0 = io_acc_write_1_bits_mask_5; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_6_0 = io_acc_write_1_bits_mask_6; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_7_0 = io_acc_write_1_bits_mask_7; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_8_0 = io_acc_write_1_bits_mask_8; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_9_0 = io_acc_write_1_bits_mask_9; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_10_0 = io_acc_write_1_bits_mask_10; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_11_0 = io_acc_write_1_bits_mask_11; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_12_0 = io_acc_write_1_bits_mask_12; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_13_0 = io_acc_write_1_bits_mask_13; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_14_0 = io_acc_write_1_bits_mask_14; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_15_0 = io_acc_write_1_bits_mask_15; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_16_0 = io_acc_write_1_bits_mask_16; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_17_0 = io_acc_write_1_bits_mask_17; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_18_0 = io_acc_write_1_bits_mask_18; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_19_0 = io_acc_write_1_bits_mask_19; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_20_0 = io_acc_write_1_bits_mask_20; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_21_0 = io_acc_write_1_bits_mask_21; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_22_0 = io_acc_write_1_bits_mask_22; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_23_0 = io_acc_write_1_bits_mask_23; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_24_0 = io_acc_write_1_bits_mask_24; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_25_0 = io_acc_write_1_bits_mask_25; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_26_0 = io_acc_write_1_bits_mask_26; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_27_0 = io_acc_write_1_bits_mask_27; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_28_0 = io_acc_write_1_bits_mask_28; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_29_0 = io_acc_write_1_bits_mask_29; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_30_0 = io_acc_write_1_bits_mask_30; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_31_0 = io_acc_write_1_bits_mask_31; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_32_0 = io_acc_write_1_bits_mask_32; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_33_0 = io_acc_write_1_bits_mask_33; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_34_0 = io_acc_write_1_bits_mask_34; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_35_0 = io_acc_write_1_bits_mask_35; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_36_0 = io_acc_write_1_bits_mask_36; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_37_0 = io_acc_write_1_bits_mask_37; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_38_0 = io_acc_write_1_bits_mask_38; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_39_0 = io_acc_write_1_bits_mask_39; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_40_0 = io_acc_write_1_bits_mask_40; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_41_0 = io_acc_write_1_bits_mask_41; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_42_0 = io_acc_write_1_bits_mask_42; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_43_0 = io_acc_write_1_bits_mask_43; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_44_0 = io_acc_write_1_bits_mask_44; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_45_0 = io_acc_write_1_bits_mask_45; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_46_0 = io_acc_write_1_bits_mask_46; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_47_0 = io_acc_write_1_bits_mask_47; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_48_0 = io_acc_write_1_bits_mask_48; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_49_0 = io_acc_write_1_bits_mask_49; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_50_0 = io_acc_write_1_bits_mask_50; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_51_0 = io_acc_write_1_bits_mask_51; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_52_0 = io_acc_write_1_bits_mask_52; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_53_0 = io_acc_write_1_bits_mask_53; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_54_0 = io_acc_write_1_bits_mask_54; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_55_0 = io_acc_write_1_bits_mask_55; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_56_0 = io_acc_write_1_bits_mask_56; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_57_0 = io_acc_write_1_bits_mask_57; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_58_0 = io_acc_write_1_bits_mask_58; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_59_0 = io_acc_write_1_bits_mask_59; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_60_0 = io_acc_write_1_bits_mask_60; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_61_0 = io_acc_write_1_bits_mask_61; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_62_0 = io_acc_write_1_bits_mask_62; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_bits_mask_63_0 = io_acc_write_1_bits_mask_63; // @[Scratchpad.scala:205:9]
wire io_tlb_0_resp_miss_0 = io_tlb_0_resp_miss; // @[Scratchpad.scala:205:9]
wire [31:0] io_tlb_0_resp_paddr_0 = io_tlb_0_resp_paddr; // @[Scratchpad.scala:205:9]
wire [39:0] io_tlb_0_resp_gpa_0 = io_tlb_0_resp_gpa; // @[Scratchpad.scala:205:9]
wire io_tlb_0_resp_pf_ld_0 = io_tlb_0_resp_pf_ld; // @[Scratchpad.scala:205:9]
wire io_tlb_0_resp_pf_st_0 = io_tlb_0_resp_pf_st; // @[Scratchpad.scala:205:9]
wire io_tlb_0_resp_pf_inst_0 = io_tlb_0_resp_pf_inst; // @[Scratchpad.scala:205:9]
wire io_tlb_0_resp_ae_ld_0 = io_tlb_0_resp_ae_ld; // @[Scratchpad.scala:205:9]
wire io_tlb_0_resp_ae_st_0 = io_tlb_0_resp_ae_st; // @[Scratchpad.scala:205:9]
wire io_tlb_0_resp_ae_inst_0 = io_tlb_0_resp_ae_inst; // @[Scratchpad.scala:205:9]
wire io_tlb_0_resp_cacheable_0 = io_tlb_0_resp_cacheable; // @[Scratchpad.scala:205:9]
wire io_tlb_0_resp_must_alloc_0 = io_tlb_0_resp_must_alloc; // @[Scratchpad.scala:205:9]
wire io_tlb_0_resp_prefetchable_0 = io_tlb_0_resp_prefetchable; // @[Scratchpad.scala:205:9]
wire [4:0] io_tlb_0_resp_cmd_0 = io_tlb_0_resp_cmd; // @[Scratchpad.scala:205:9]
wire io_tlb_1_resp_miss_0 = io_tlb_1_resp_miss; // @[Scratchpad.scala:205:9]
wire [31:0] io_tlb_1_resp_paddr_0 = io_tlb_1_resp_paddr; // @[Scratchpad.scala:205:9]
wire [39:0] io_tlb_1_resp_gpa_0 = io_tlb_1_resp_gpa; // @[Scratchpad.scala:205:9]
wire io_tlb_1_resp_pf_ld_0 = io_tlb_1_resp_pf_ld; // @[Scratchpad.scala:205:9]
wire io_tlb_1_resp_pf_st_0 = io_tlb_1_resp_pf_st; // @[Scratchpad.scala:205:9]
wire io_tlb_1_resp_pf_inst_0 = io_tlb_1_resp_pf_inst; // @[Scratchpad.scala:205:9]
wire io_tlb_1_resp_ae_ld_0 = io_tlb_1_resp_ae_ld; // @[Scratchpad.scala:205:9]
wire io_tlb_1_resp_ae_st_0 = io_tlb_1_resp_ae_st; // @[Scratchpad.scala:205:9]
wire io_tlb_1_resp_ae_inst_0 = io_tlb_1_resp_ae_inst; // @[Scratchpad.scala:205:9]
wire io_tlb_1_resp_cacheable_0 = io_tlb_1_resp_cacheable; // @[Scratchpad.scala:205:9]
wire io_tlb_1_resp_must_alloc_0 = io_tlb_1_resp_must_alloc; // @[Scratchpad.scala:205:9]
wire io_tlb_1_resp_prefetchable_0 = io_tlb_1_resp_prefetchable; // @[Scratchpad.scala:205:9]
wire [4:0] io_tlb_1_resp_cmd_0 = io_tlb_1_resp_cmd; // @[Scratchpad.scala:205:9]
wire io_flush_0 = io_flush; // @[Scratchpad.scala:205:9]
wire io_counter_external_reset_0 = io_counter_external_reset; // @[Scratchpad.scala:205:9]
wire [31:0] io_dma_write_req_bits_acc_igelu_qb = 32'h0; // @[Scratchpad.scala:205:9]
wire [31:0] io_dma_write_req_bits_acc_igelu_qc = 32'h0; // @[Scratchpad.scala:205:9]
wire [31:0] io_dma_write_req_bits_acc_iexp_qln2 = 32'h0; // @[Scratchpad.scala:205:9]
wire [31:0] io_dma_write_req_bits_acc_iexp_qln2_inv = 32'h0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_req_0_bits_igelu_qb = 32'h0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_req_0_bits_igelu_qc = 32'h0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_req_0_bits_iexp_qln2 = 32'h0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_req_0_bits_iexp_qln2_inv = 32'h0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_req_1_bits_igelu_qb = 32'h0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_req_1_bits_igelu_qc = 32'h0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_req_1_bits_iexp_qln2 = 32'h0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_req_1_bits_iexp_qln2_inv = 32'h0; // @[Scratchpad.scala:205:9]
wire [31:0] io_counter_external_values_0 = 32'h0; // @[Scratchpad.scala:205:9]
wire [31:0] io_counter_external_values_1 = 32'h0; // @[Scratchpad.scala:205:9]
wire [31:0] io_counter_external_values_2 = 32'h0; // @[Scratchpad.scala:205:9]
wire [31:0] io_counter_external_values_3 = 32'h0; // @[Scratchpad.scala:205:9]
wire [31:0] acc_norm_unit_out_bits_mean = 32'h0; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_max = 32'h0; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_inv_stddev_bits = 32'h0; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_inv_sum_exp_bits = 32'h0; // @[Normalizer.scala:809:38]
wire [7:0] io_dma_write_req_bits_acc_norm_stats_id = 8'h0; // @[Scratchpad.scala:205:9]
wire io_srams_read_0_req_bits_fromDMA = 1'h0; // @[Scratchpad.scala:205:9]
wire io_srams_read_1_req_bits_fromDMA = 1'h0; // @[Scratchpad.scala:205:9]
wire io_srams_read_2_req_bits_fromDMA = 1'h0; // @[Scratchpad.scala:205:9]
wire io_srams_read_3_req_bits_fromDMA = 1'h0; // @[Scratchpad.scala:205:9]
wire io_acc_read_req_0_bits_full = 1'h0; // @[Scratchpad.scala:205:9]
wire io_acc_read_req_0_bits_fromDMA = 1'h0; // @[Scratchpad.scala:205:9]
wire io_acc_read_req_1_bits_full = 1'h0; // @[Scratchpad.scala:205:9]
wire io_acc_read_req_1_bits_fromDMA = 1'h0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_tlb_req_passthrough = 1'h0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_tlb_req_v = 1'h0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_resp_gpa_is_pte = 1'h0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_resp_gf_ld = 1'h0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_resp_gf_st = 1'h0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_resp_gf_inst = 1'h0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_resp_ma_ld = 1'h0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_resp_ma_st = 1'h0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_resp_ma_inst = 1'h0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_tlb_req_passthrough = 1'h0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_tlb_req_v = 1'h0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_resp_gpa_is_pte = 1'h0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_resp_gf_ld = 1'h0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_resp_gf_st = 1'h0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_resp_gf_inst = 1'h0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_resp_ma_ld = 1'h0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_resp_ma_st = 1'h0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_resp_ma_inst = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_0 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_1 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_2 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_3 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_4 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_5 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_6 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_7 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_8 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_9 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_10 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_11 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_12 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_13 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_14 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_15 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_16 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_17 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_24 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_25 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_26 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_27 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_28 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_29 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_30 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_31 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_32 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_33 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_34 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_35 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_36 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_37 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_38 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_39 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_40 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_41 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_42 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_43 = 1'h0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_44 = 1'h0; // @[Scratchpad.scala:205:9]
wire [1:0] io_tlb_0_req_bits_tlb_req_size = 2'h0; // @[Scratchpad.scala:205:9]
wire [1:0] io_tlb_0_req_bits_tlb_req_prv = 2'h0; // @[Scratchpad.scala:205:9]
wire [1:0] io_tlb_0_resp_size = 2'h0; // @[Scratchpad.scala:205:9]
wire [1:0] io_tlb_1_req_bits_tlb_req_size = 2'h0; // @[Scratchpad.scala:205:9]
wire [1:0] io_tlb_1_req_bits_tlb_req_prv = 2'h0; // @[Scratchpad.scala:205:9]
wire [1:0] io_tlb_1_resp_size = 2'h0; // @[Scratchpad.scala:205:9]
wire [1:0] bank_ios_1_0_read_resp_bits_acc_bank_id = 2'h0; // @[Scratchpad.scala:637:29]
wire [1:0] bank_ios_1_1_read_resp_bits_acc_bank_id = 2'h0; // @[Scratchpad.scala:637:29]
wire io_acc_write_0_ready = 1'h1; // @[Scratchpad.scala:205:9]
wire io_acc_write_1_ready = 1'h1; // @[Scratchpad.scala:205:9]
wire [2:0] io_dma_write_req_bits_laddr_norm_cmd = 3'h0; // @[Scratchpad.scala:205:9]
wire [4:0] io_tlb_0_req_bits_tlb_req_cmd = 5'h1; // @[Scratchpad.scala:205:9]
wire [4:0] io_tlb_1_req_bits_tlb_req_cmd = 5'h0; // @[Scratchpad.scala:205:9]
wire id_nodeOut_a_ready = auto_id_out_a_ready_0; // @[Scratchpad.scala:205:9]
wire id_nodeOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] id_nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] id_nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] id_nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [4:0] id_nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] id_nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [15:0] id_nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [127:0] id_nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire id_nodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire id_nodeOut_d_ready; // @[MixedNode.scala:542:17]
wire id_nodeOut_d_valid = auto_id_out_d_valid_0; // @[Scratchpad.scala:205:9]
wire [2:0] id_nodeOut_d_bits_opcode = auto_id_out_d_bits_opcode_0; // @[Scratchpad.scala:205:9]
wire [1:0] id_nodeOut_d_bits_param = auto_id_out_d_bits_param_0; // @[Scratchpad.scala:205:9]
wire [3:0] id_nodeOut_d_bits_size = auto_id_out_d_bits_size_0; // @[Scratchpad.scala:205:9]
wire [4:0] id_nodeOut_d_bits_source = auto_id_out_d_bits_source_0; // @[Scratchpad.scala:205:9]
wire [3:0] id_nodeOut_d_bits_sink = auto_id_out_d_bits_sink_0; // @[Scratchpad.scala:205:9]
wire id_nodeOut_d_bits_denied = auto_id_out_d_bits_denied_0; // @[Scratchpad.scala:205:9]
wire [127:0] id_nodeOut_d_bits_data = auto_id_out_d_bits_data_0; // @[Scratchpad.scala:205:9]
wire id_nodeOut_d_bits_corrupt = auto_id_out_d_bits_corrupt_0; // @[Scratchpad.scala:205:9]
wire _io_dma_read_resp_valid_T_1; // @[Scratchpad.scala:425:78]
wire [7:0] _io_dma_read_resp_bits_cmd_id_T_1; // @[Mux.scala:126:16]
wire bank_ios_0_read_req_ready; // @[Scratchpad.scala:452:29]
wire bank_ios_1_read_req_ready; // @[Scratchpad.scala:452:29]
wire bank_ios_2_read_req_ready; // @[Scratchpad.scala:452:29]
wire bank_ios_3_read_req_ready; // @[Scratchpad.scala:452:29]
wire bank_ios_1_0_read_req_ready; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_read_req_ready; // @[Scratchpad.scala:637:29]
wire _io_busy_T_4; // @[Scratchpad.scala:444:152]
wire [2:0] auto_id_out_a_bits_opcode_0; // @[Scratchpad.scala:205:9]
wire [2:0] auto_id_out_a_bits_param_0; // @[Scratchpad.scala:205:9]
wire [3:0] auto_id_out_a_bits_size_0; // @[Scratchpad.scala:205:9]
wire [4:0] auto_id_out_a_bits_source_0; // @[Scratchpad.scala:205:9]
wire [31:0] auto_id_out_a_bits_address_0; // @[Scratchpad.scala:205:9]
wire [15:0] auto_id_out_a_bits_mask_0; // @[Scratchpad.scala:205:9]
wire [127:0] auto_id_out_a_bits_data_0; // @[Scratchpad.scala:205:9]
wire auto_id_out_a_bits_corrupt_0; // @[Scratchpad.scala:205:9]
wire auto_id_out_a_valid_0; // @[Scratchpad.scala:205:9]
wire auto_id_out_d_ready_0; // @[Scratchpad.scala:205:9]
wire io_dma_read_req_ready_0; // @[Scratchpad.scala:205:9]
wire [15:0] io_dma_read_resp_bits_bytesRead_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_dma_read_resp_bits_cmd_id_0; // @[Scratchpad.scala:205:9]
wire io_dma_read_resp_valid_0; // @[Scratchpad.scala:205:9]
wire io_dma_write_req_ready_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_dma_write_resp_bits_cmd_id_0; // @[Scratchpad.scala:205:9]
wire io_dma_write_resp_valid_0; // @[Scratchpad.scala:205:9]
wire io_srams_read_0_req_ready_0; // @[Scratchpad.scala:205:9]
wire [127:0] io_srams_read_0_resp_bits_data_0; // @[Scratchpad.scala:205:9]
wire io_srams_read_0_resp_bits_fromDMA_0; // @[Scratchpad.scala:205:9]
wire io_srams_read_0_resp_valid_0; // @[Scratchpad.scala:205:9]
wire io_srams_read_1_req_ready_0; // @[Scratchpad.scala:205:9]
wire [127:0] io_srams_read_1_resp_bits_data_0; // @[Scratchpad.scala:205:9]
wire io_srams_read_1_resp_bits_fromDMA_0; // @[Scratchpad.scala:205:9]
wire io_srams_read_1_resp_valid_0; // @[Scratchpad.scala:205:9]
wire io_srams_read_2_req_ready_0; // @[Scratchpad.scala:205:9]
wire [127:0] io_srams_read_2_resp_bits_data_0; // @[Scratchpad.scala:205:9]
wire io_srams_read_2_resp_bits_fromDMA_0; // @[Scratchpad.scala:205:9]
wire io_srams_read_2_resp_valid_0; // @[Scratchpad.scala:205:9]
wire io_srams_read_3_req_ready_0; // @[Scratchpad.scala:205:9]
wire [127:0] io_srams_read_3_resp_bits_data_0; // @[Scratchpad.scala:205:9]
wire io_srams_read_3_resp_bits_fromDMA_0; // @[Scratchpad.scala:205:9]
wire io_srams_read_3_resp_valid_0; // @[Scratchpad.scala:205:9]
wire io_acc_read_req_0_ready_0; // @[Scratchpad.scala:205:9]
wire io_acc_read_req_1_ready_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_0_bits_full_data_0_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_0_bits_full_data_1_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_0_bits_full_data_2_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_0_bits_full_data_3_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_0_bits_full_data_4_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_0_bits_full_data_5_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_0_bits_full_data_6_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_0_bits_full_data_7_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_0_bits_full_data_8_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_0_bits_full_data_9_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_0_bits_full_data_10_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_0_bits_full_data_11_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_0_bits_full_data_12_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_0_bits_full_data_13_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_0_bits_full_data_14_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_0_bits_full_data_15_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_0_bits_data_0_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_0_bits_data_1_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_0_bits_data_2_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_0_bits_data_3_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_0_bits_data_4_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_0_bits_data_5_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_0_bits_data_6_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_0_bits_data_7_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_0_bits_data_8_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_0_bits_data_9_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_0_bits_data_10_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_0_bits_data_11_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_0_bits_data_12_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_0_bits_data_13_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_0_bits_data_14_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_0_bits_data_15_0_0; // @[Scratchpad.scala:205:9]
wire [1:0] io_acc_read_resp_0_bits_acc_bank_id_0; // @[Scratchpad.scala:205:9]
wire io_acc_read_resp_0_bits_fromDMA_0; // @[Scratchpad.scala:205:9]
wire io_acc_read_resp_0_valid_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_1_bits_full_data_0_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_1_bits_full_data_1_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_1_bits_full_data_2_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_1_bits_full_data_3_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_1_bits_full_data_4_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_1_bits_full_data_5_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_1_bits_full_data_6_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_1_bits_full_data_7_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_1_bits_full_data_8_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_1_bits_full_data_9_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_1_bits_full_data_10_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_1_bits_full_data_11_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_1_bits_full_data_12_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_1_bits_full_data_13_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_1_bits_full_data_14_0_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_acc_read_resp_1_bits_full_data_15_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_1_bits_data_0_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_1_bits_data_1_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_1_bits_data_2_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_1_bits_data_3_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_1_bits_data_4_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_1_bits_data_5_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_1_bits_data_6_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_1_bits_data_7_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_1_bits_data_8_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_1_bits_data_9_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_1_bits_data_10_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_1_bits_data_11_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_1_bits_data_12_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_1_bits_data_13_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_1_bits_data_14_0_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_acc_read_resp_1_bits_data_15_0_0; // @[Scratchpad.scala:205:9]
wire [1:0] io_acc_read_resp_1_bits_acc_bank_id_0; // @[Scratchpad.scala:205:9]
wire io_acc_read_resp_1_bits_fromDMA_0; // @[Scratchpad.scala:205:9]
wire io_acc_read_resp_1_valid_0; // @[Scratchpad.scala:205:9]
wire [39:0] io_tlb_0_req_bits_tlb_req_vaddr_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_debug_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_cease_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_wfi_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_tlb_0_req_bits_status_isa_0; // @[Scratchpad.scala:205:9]
wire [1:0] io_tlb_0_req_bits_status_dprv_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_dv_0; // @[Scratchpad.scala:205:9]
wire [1:0] io_tlb_0_req_bits_status_prv_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_v_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_sd_0; // @[Scratchpad.scala:205:9]
wire [22:0] io_tlb_0_req_bits_status_zero2_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_mpv_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_gva_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_mbe_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_sbe_0; // @[Scratchpad.scala:205:9]
wire [1:0] io_tlb_0_req_bits_status_sxl_0; // @[Scratchpad.scala:205:9]
wire [1:0] io_tlb_0_req_bits_status_uxl_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_sd_rv32_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_tlb_0_req_bits_status_zero1_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_tsr_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_tw_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_tvm_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_mxr_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_sum_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_mprv_0; // @[Scratchpad.scala:205:9]
wire [1:0] io_tlb_0_req_bits_status_xs_0; // @[Scratchpad.scala:205:9]
wire [1:0] io_tlb_0_req_bits_status_fs_0; // @[Scratchpad.scala:205:9]
wire [1:0] io_tlb_0_req_bits_status_mpp_0; // @[Scratchpad.scala:205:9]
wire [1:0] io_tlb_0_req_bits_status_vs_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_spp_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_mpie_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_ube_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_spie_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_upie_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_mie_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_hie_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_sie_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_bits_status_uie_0; // @[Scratchpad.scala:205:9]
wire io_tlb_0_req_valid_0; // @[Scratchpad.scala:205:9]
wire [39:0] io_tlb_1_req_bits_tlb_req_vaddr_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_debug_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_cease_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_wfi_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_tlb_1_req_bits_status_isa_0; // @[Scratchpad.scala:205:9]
wire [1:0] io_tlb_1_req_bits_status_dprv_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_dv_0; // @[Scratchpad.scala:205:9]
wire [1:0] io_tlb_1_req_bits_status_prv_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_v_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_sd_0; // @[Scratchpad.scala:205:9]
wire [22:0] io_tlb_1_req_bits_status_zero2_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_mpv_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_gva_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_mbe_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_sbe_0; // @[Scratchpad.scala:205:9]
wire [1:0] io_tlb_1_req_bits_status_sxl_0; // @[Scratchpad.scala:205:9]
wire [1:0] io_tlb_1_req_bits_status_uxl_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_sd_rv32_0; // @[Scratchpad.scala:205:9]
wire [7:0] io_tlb_1_req_bits_status_zero1_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_tsr_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_tw_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_tvm_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_mxr_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_sum_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_mprv_0; // @[Scratchpad.scala:205:9]
wire [1:0] io_tlb_1_req_bits_status_xs_0; // @[Scratchpad.scala:205:9]
wire [1:0] io_tlb_1_req_bits_status_fs_0; // @[Scratchpad.scala:205:9]
wire [1:0] io_tlb_1_req_bits_status_mpp_0; // @[Scratchpad.scala:205:9]
wire [1:0] io_tlb_1_req_bits_status_vs_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_spp_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_mpie_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_ube_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_spie_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_upie_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_mie_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_hie_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_sie_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_bits_status_uie_0; // @[Scratchpad.scala:205:9]
wire io_tlb_1_req_valid_0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_18_0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_19_0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_20_0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_21_0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_22_0; // @[Scratchpad.scala:205:9]
wire io_counter_event_signal_23_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_counter_external_values_4_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_counter_external_values_5_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_counter_external_values_6_0; // @[Scratchpad.scala:205:9]
wire [31:0] io_counter_external_values_7_0; // @[Scratchpad.scala:205:9]
wire io_busy_0; // @[Scratchpad.scala:205:9]
wire widget_anonIn_a_ready; // @[MixedNode.scala:551:17]
wire widget_anonIn_a_valid = widget_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonIn_a_bits_opcode = widget_auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonIn_a_bits_param = widget_auto_anon_in_a_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_anonIn_a_bits_size = widget_auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9]
wire [4:0] widget_anonIn_a_bits_source = widget_auto_anon_in_a_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] widget_anonIn_a_bits_address = widget_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire [15:0] widget_anonIn_a_bits_mask = widget_auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [127:0] widget_anonIn_a_bits_data = widget_auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9]
wire widget_anonIn_a_bits_corrupt = widget_auto_anon_in_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_anonIn_d_ready = widget_auto_anon_in_d_ready; // @[WidthWidget.scala:27:9]
wire widget_anonIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] widget_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] widget_anonIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] widget_anonIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [4:0] widget_anonIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [3:0] widget_anonIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire widget_anonIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [127:0] widget_anonIn_d_bits_data; // @[MixedNode.scala:551:17]
wire widget_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire id_nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire widget_anonOut_a_ready = widget_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire widget_anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] widget_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire id_nodeIn_a_valid = widget_auto_anon_out_a_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] id_nodeIn_a_bits_opcode = widget_auto_anon_out_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [3:0] widget_anonOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [2:0] id_nodeIn_a_bits_param = widget_auto_anon_out_a_bits_param; // @[WidthWidget.scala:27:9]
wire [4:0] widget_anonOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [3:0] id_nodeIn_a_bits_size = widget_auto_anon_out_a_bits_size; // @[WidthWidget.scala:27:9]
wire [31:0] widget_anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [4:0] id_nodeIn_a_bits_source = widget_auto_anon_out_a_bits_source; // @[WidthWidget.scala:27:9]
wire [15:0] widget_anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [31:0] id_nodeIn_a_bits_address = widget_auto_anon_out_a_bits_address; // @[WidthWidget.scala:27:9]
wire [127:0] widget_anonOut_a_bits_data; // @[MixedNode.scala:542:17]
wire [15:0] id_nodeIn_a_bits_mask = widget_auto_anon_out_a_bits_mask; // @[WidthWidget.scala:27:9]
wire widget_anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire [127:0] id_nodeIn_a_bits_data = widget_auto_anon_out_a_bits_data; // @[WidthWidget.scala:27:9]
wire widget_anonOut_d_ready; // @[MixedNode.scala:542:17]
wire id_nodeIn_a_bits_corrupt = widget_auto_anon_out_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire id_nodeIn_d_ready = widget_auto_anon_out_d_ready; // @[WidthWidget.scala:27:9]
wire id_nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire widget_anonOut_d_valid = widget_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire [2:0] id_nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [2:0] widget_anonOut_d_bits_opcode = widget_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] id_nodeIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [1:0] widget_anonOut_d_bits_param = widget_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] id_nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [3:0] widget_anonOut_d_bits_size = widget_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire [4:0] id_nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [4:0] widget_anonOut_d_bits_source = widget_auto_anon_out_d_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] id_nodeIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire [3:0] widget_anonOut_d_bits_sink = widget_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire id_nodeIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire widget_anonOut_d_bits_denied = widget_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [127:0] id_nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire [127:0] widget_anonOut_d_bits_data = widget_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire id_nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire widget_anonOut_d_bits_corrupt = widget_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_ready; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] widget_auto_anon_in_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_in_d_bits_size; // @[WidthWidget.scala:27:9]
wire [4:0] widget_auto_anon_in_d_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_in_d_bits_sink; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [127:0] widget_auto_anon_in_d_bits_data; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_valid; // @[WidthWidget.scala:27:9]
assign widget_anonIn_a_ready = widget_anonOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_out_a_valid = widget_anonOut_a_valid; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_opcode = widget_anonOut_a_bits_opcode; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_param = widget_anonOut_a_bits_param; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_size = widget_anonOut_a_bits_size; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_source = widget_anonOut_a_bits_source; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_address = widget_anonOut_a_bits_address; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_mask = widget_anonOut_a_bits_mask; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_data = widget_anonOut_a_bits_data; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_corrupt = widget_anonOut_a_bits_corrupt; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_d_ready = widget_anonOut_d_ready; // @[WidthWidget.scala:27:9]
assign widget_anonIn_d_valid = widget_anonOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_opcode = widget_anonOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_param = widget_anonOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_size = widget_anonOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_source = widget_anonOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_sink = widget_anonOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_denied = widget_anonOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_data = widget_anonOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_corrupt = widget_anonOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_in_a_ready = widget_anonIn_a_ready; // @[WidthWidget.scala:27:9]
assign widget_anonOut_a_valid = widget_anonIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_opcode = widget_anonIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_param = widget_anonIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_size = widget_anonIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_source = widget_anonIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_address = widget_anonIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_mask = widget_anonIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_data = widget_anonIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_corrupt = widget_anonIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_d_ready = widget_anonIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_in_d_valid = widget_anonIn_d_valid; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_opcode = widget_anonIn_d_bits_opcode; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_param = widget_anonIn_d_bits_param; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_size = widget_anonIn_d_bits_size; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_source = widget_anonIn_d_bits_source; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_sink = widget_anonIn_d_bits_sink; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_denied = widget_anonIn_d_bits_denied; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_data = widget_anonIn_d_bits_data; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_corrupt = widget_anonIn_d_bits_corrupt; // @[WidthWidget.scala:27:9]
assign id_nodeIn_a_ready = id_nodeOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign auto_id_out_a_valid_0 = id_nodeOut_a_valid; // @[Scratchpad.scala:205:9]
assign auto_id_out_a_bits_opcode_0 = id_nodeOut_a_bits_opcode; // @[Scratchpad.scala:205:9]
assign auto_id_out_a_bits_param_0 = id_nodeOut_a_bits_param; // @[Scratchpad.scala:205:9]
assign auto_id_out_a_bits_size_0 = id_nodeOut_a_bits_size; // @[Scratchpad.scala:205:9]
assign auto_id_out_a_bits_source_0 = id_nodeOut_a_bits_source; // @[Scratchpad.scala:205:9]
assign auto_id_out_a_bits_address_0 = id_nodeOut_a_bits_address; // @[Scratchpad.scala:205:9]
assign auto_id_out_a_bits_mask_0 = id_nodeOut_a_bits_mask; // @[Scratchpad.scala:205:9]
assign auto_id_out_a_bits_data_0 = id_nodeOut_a_bits_data; // @[Scratchpad.scala:205:9]
assign auto_id_out_a_bits_corrupt_0 = id_nodeOut_a_bits_corrupt; // @[Scratchpad.scala:205:9]
assign auto_id_out_d_ready_0 = id_nodeOut_d_ready; // @[Scratchpad.scala:205:9]
assign id_nodeIn_d_valid = id_nodeOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign id_nodeIn_d_bits_opcode = id_nodeOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign id_nodeIn_d_bits_param = id_nodeOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign id_nodeIn_d_bits_size = id_nodeOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign id_nodeIn_d_bits_source = id_nodeOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign id_nodeIn_d_bits_sink = id_nodeOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign id_nodeIn_d_bits_denied = id_nodeOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign id_nodeIn_d_bits_data = id_nodeOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign id_nodeIn_d_bits_corrupt = id_nodeOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_out_a_ready = id_nodeIn_a_ready; // @[WidthWidget.scala:27:9]
assign id_nodeOut_a_valid = id_nodeIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign id_nodeOut_a_bits_opcode = id_nodeIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign id_nodeOut_a_bits_param = id_nodeIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign id_nodeOut_a_bits_size = id_nodeIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign id_nodeOut_a_bits_source = id_nodeIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign id_nodeOut_a_bits_address = id_nodeIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign id_nodeOut_a_bits_mask = id_nodeIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign id_nodeOut_a_bits_data = id_nodeIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign id_nodeOut_a_bits_corrupt = id_nodeIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign id_nodeOut_d_ready = id_nodeIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_out_d_valid = id_nodeIn_d_valid; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_d_bits_opcode = id_nodeIn_d_bits_opcode; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_d_bits_param = id_nodeIn_d_bits_param; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_d_bits_size = id_nodeIn_d_bits_size; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_d_bits_source = id_nodeIn_d_bits_source; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_d_bits_sink = id_nodeIn_d_bits_sink; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_d_bits_denied = id_nodeIn_d_bits_denied; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_d_bits_data = id_nodeIn_d_bits_data; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_d_bits_corrupt = id_nodeIn_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire _T_22 = _write_dispatch_q_q_io_deq_bits_laddr_is_acc_addr & _write_dispatch_q_q_io_deq_bits_laddr_accumulate; // @[Decoupled.scala:362:21]
wire _dmawrite_T_1; // @[LocalAddr.scala:43:48]
assign _dmawrite_T_1 = _T_22; // @[LocalAddr.scala:43:48]
wire _dmawrite_T_17; // @[LocalAddr.scala:43:48]
assign _dmawrite_T_17 = _T_22; // @[LocalAddr.scala:43:48]
wire _dmawrite_T_33; // @[LocalAddr.scala:43:48]
assign _dmawrite_T_33 = _T_22; // @[LocalAddr.scala:43:48]
wire _dmawrite_T_49; // @[LocalAddr.scala:43:48]
assign _dmawrite_T_49 = _T_22; // @[LocalAddr.scala:43:48]
wire _dmawrite_T_65; // @[LocalAddr.scala:43:48]
assign _dmawrite_T_65 = _T_22; // @[LocalAddr.scala:43:48]
wire _dmawrite_T_77; // @[LocalAddr.scala:43:48]
assign _dmawrite_T_77 = _T_22; // @[LocalAddr.scala:43:48]
wire _T_5 = _T_22 & _write_dispatch_q_q_io_deq_bits_laddr_read_full_acc_row & (&_write_dispatch_q_q_io_deq_bits_laddr_data) & _write_dispatch_q_q_io_deq_bits_laddr_garbage_bit; // @[Decoupled.scala:362:21]
wire _T_131 = _write_norm_q_io_deq_bits_laddr_is_acc_addr & _write_norm_q_io_deq_bits_laddr_accumulate; // @[Scratchpad.scala:252:30]
wire _T_13 = _T_131 & _write_norm_q_io_deq_bits_laddr_read_full_acc_row & (&_write_norm_q_io_deq_bits_laddr_data) & _write_norm_q_io_deq_bits_laddr_garbage_bit | ~_write_norm_q_io_deq_bits_laddr_is_acc_addr; // @[Scratchpad.scala:252:30, :276:{55,58}]
wire _acc_waiting_to_be_scaled_T = _write_scale_q_io_deq_bits_laddr_is_acc_addr & _write_scale_q_io_deq_bits_laddr_accumulate; // @[Scratchpad.scala:253:31]
wire writeData_valid; // @[Scratchpad.scala:285:25]
wire [511:0] writeData_bits; // @[Scratchpad.scala:285:25]
wire _GEN = _write_issue_q_io_deq_bits_laddr_is_acc_addr & _write_issue_q_io_deq_bits_laddr_accumulate; // @[Scratchpad.scala:254:31]
wire _writeData_valid_T; // @[LocalAddr.scala:43:48]
assign _writeData_valid_T = _GEN; // @[LocalAddr.scala:43:48]
wire _writeData_is_full_width_T; // @[LocalAddr.scala:43:48]
assign _writeData_is_full_width_T = _GEN; // @[LocalAddr.scala:43:48]
wire _writeData_is_all_zeros_T; // @[LocalAddr.scala:43:48]
assign _writeData_is_all_zeros_T = _GEN; // @[LocalAddr.scala:43:48]
wire _p_io_out_ready_T_5; // @[LocalAddr.scala:43:48]
assign _p_io_out_ready_T_5 = _GEN; // @[LocalAddr.scala:43:48]
wire _p_io_out_ready_T_18; // @[LocalAddr.scala:43:48]
assign _p_io_out_ready_T_18 = _GEN; // @[LocalAddr.scala:43:48]
wire _p_io_out_ready_T_31; // @[LocalAddr.scala:43:48]
assign _p_io_out_ready_T_31 = _GEN; // @[LocalAddr.scala:43:48]
wire _p_io_out_ready_T_44; // @[LocalAddr.scala:43:48]
assign _p_io_out_ready_T_44 = _GEN; // @[LocalAddr.scala:43:48]
wire _dma_resp_ready_T_1; // @[LocalAddr.scala:43:48]
assign _dma_resp_ready_T_1 = _GEN; // @[LocalAddr.scala:43:48]
wire _writeData_valid_T_1 = _writeData_valid_T & _write_issue_q_io_deq_bits_laddr_read_full_acc_row; // @[Scratchpad.scala:254:31]
wire _writeData_valid_T_2 = &_write_issue_q_io_deq_bits_laddr_data; // @[Scratchpad.scala:254:31]
wire _writeData_valid_T_3 = _writeData_valid_T_1 & _writeData_valid_T_2; // @[LocalAddr.scala:43:{62,83,91}]
wire _writeData_valid_T_4; // @[LocalAddr.scala:44:48]
wire _writeData_valid_T_5 = _writeData_valid_T_3 & _writeData_valid_T_4; // @[LocalAddr.scala:43:{83,96}, :44:48]
wire [511:0] _fullAccWriteData_T_16; // @[Scratchpad.scala:616:64]
wire [511:0] fullAccWriteData; // @[Scratchpad.scala:288:32]
wire _writeData_is_full_width_T_1 = _writeData_is_full_width_T & _write_issue_q_io_deq_bits_laddr_read_full_acc_row; // @[Scratchpad.scala:254:31]
wire _writeData_is_full_width_T_2 = &_write_issue_q_io_deq_bits_laddr_data; // @[Scratchpad.scala:254:31]
wire _writeData_is_full_width_T_3 = _writeData_is_full_width_T_1 & _writeData_is_full_width_T_2; // @[LocalAddr.scala:43:{62,83,91}]
wire _writeData_is_full_width_T_4; // @[LocalAddr.scala:44:48]
wire _writeData_is_full_width_T_5 = _writeData_is_full_width_T_3 & _writeData_is_full_width_T_4; // @[LocalAddr.scala:43:{83,96}, :44:48]
wire _writeData_is_full_width_T_6 = ~_writeData_is_full_width_T_5; // @[Scratchpad.scala:290:35]
wire _writeData_is_full_width_T_7 = _writeData_is_full_width_T_6 & _write_issue_q_io_deq_bits_laddr_is_acc_addr; // @[Scratchpad.scala:254:31, :290:{35,81}]
wire writeData_is_full_width = _writeData_is_full_width_T_7 & _write_issue_q_io_deq_bits_laddr_read_full_acc_row; // @[Scratchpad.scala:254:31, :290:81, :291:51]
wire _writeData_is_all_zeros_T_1 = _writeData_is_all_zeros_T & _write_issue_q_io_deq_bits_laddr_read_full_acc_row; // @[Scratchpad.scala:254:31]
wire _writeData_is_all_zeros_T_2 = &_write_issue_q_io_deq_bits_laddr_data; // @[Scratchpad.scala:254:31]
wire _writeData_is_all_zeros_T_3 = _writeData_is_all_zeros_T_1 & _writeData_is_all_zeros_T_2; // @[LocalAddr.scala:43:{62,83,91}]
wire _writeData_is_all_zeros_T_4; // @[LocalAddr.scala:44:48]
wire writeData_is_all_zeros = _writeData_is_all_zeros_T_3 & _writeData_is_all_zeros_T_4; // @[LocalAddr.scala:43:{83,96}, :44:48]
wire _writer_io_req_valid_T = _write_issue_q_io_deq_valid & writeData_valid; // @[Scratchpad.scala:254:31, :285:25, :294:62]
wire _write_issue_q_io_deq_ready_T = _writer_io_req_ready & writeData_valid; // @[Scratchpad.scala:192:26, :285:25, :295:62]
wire [18:0] _writer_io_req_bits_len_T = {1'h0, _write_issue_q_io_deq_bits_len, 2'h0}; // @[Scratchpad.scala:254:31, :298:37]
wire [16:0] _writer_io_req_bits_len_T_1 = {1'h0, _write_issue_q_io_deq_bits_len}; // @[Scratchpad.scala:254:31, :299:37]
wire [18:0] _writer_io_req_bits_len_T_2 = writeData_is_full_width ? _writer_io_req_bits_len_T : {2'h0, _writer_io_req_bits_len_T_1}; // @[Scratchpad.scala:291:51, :297:41, :298:37, :299:37]
wire [511:0] _writer_io_req_bits_data_T = writeData_is_full_width ? fullAccWriteData : writeData_bits; // @[Mux.scala:126:16]
wire [511:0] _writer_io_req_bits_data_T_1 = writeData_is_all_zeros ? 512'h0 : _writer_io_req_bits_data_T; // @[Mux.scala:126:16]
assign io_dma_read_req_ready_0 = io_dma_read_req_bits_all_zeros_0 ? _zero_writer_io_req_ready : _read_issue_q_io_enq_ready; // @[Scratchpad.scala:205:9, :255:30, :315:25, :317:29, :319:43, :321:29]
wire _zero_writer_io_req_valid_T = io_dma_read_req_valid_0 & io_dma_read_req_bits_all_zeros_0; // @[Scratchpad.scala:205:9, :324:55]
wire expanded_0; // @[Scratchpad.scala:341:29]
wire expanded_1; // @[Scratchpad.scala:341:29]
wire expanded_2; // @[Scratchpad.scala:341:29]
wire expanded_3; // @[Scratchpad.scala:341:29]
wire expanded_4; // @[Scratchpad.scala:341:29]
wire expanded_5; // @[Scratchpad.scala:341:29]
wire expanded_6; // @[Scratchpad.scala:341:29]
wire expanded_7; // @[Scratchpad.scala:341:29]
wire expanded_8; // @[Scratchpad.scala:341:29]
wire expanded_9; // @[Scratchpad.scala:341:29]
wire expanded_10; // @[Scratchpad.scala:341:29]
wire expanded_11; // @[Scratchpad.scala:341:29]
wire expanded_12; // @[Scratchpad.scala:341:29]
wire expanded_13; // @[Scratchpad.scala:341:29]
wire expanded_14; // @[Scratchpad.scala:341:29]
wire expanded_15; // @[Scratchpad.scala:341:29]
wire [9:0] _reader_io_req_bits_spaddr_T = _read_issue_q_io_deq_bits_laddr_data[9:0]; // @[Scratchpad.scala:255:30]
wire [13:0] _reader_io_req_bits_spaddr_T_1; // @[LocalAddr.scala:38:42]
wire [13:0] _reader_io_req_bits_spaddr_T_2 = _read_issue_q_io_deq_bits_laddr_is_acc_addr ? {4'h0, _reader_io_req_bits_spaddr_T} : _reader_io_req_bits_spaddr_T_1; // @[Scratchpad.scala:255:30, :351:44]
wire _vsm_in_q_io_enq_valid_T = ~_reader_io_resp_bits_is_acc; // @[Scratchpad.scala:189:26, :377:83]
wire _vsm_in_q_io_enq_valid_T_1 = _vsm_in_q_io_enq_valid_T; // @[Scratchpad.scala:377:{80,83}]
wire _vsm_in_q_io_enq_valid_T_2 = ~_reader_io_resp_bits_has_acc_bitwidth; // @[Scratchpad.scala:189:26, :378:45]
wire _vsm_in_q_io_enq_valid_T_3 = _reader_io_resp_bits_is_acc & _vsm_in_q_io_enq_valid_T_2; // @[Scratchpad.scala:189:26, :378:{42,45}]
wire _vsm_in_q_io_enq_valid_T_4 = _vsm_in_q_io_enq_valid_T_1 | _vsm_in_q_io_enq_valid_T_3; // @[Scratchpad.scala:377:{80,118}, :378:42]
wire _vsm_in_q_io_enq_valid_T_5 = _reader_io_resp_valid & _vsm_in_q_io_enq_valid_T_4; // @[Scratchpad.scala:189:26, :377:{56,118}]
wire [31:0] _vsm_in_q_io_enq_bits_scale_T; // @[Scratchpad.scala:381:74]
wire [31:0] _vsm_in_q_io_enq_bits_scale_WIRE_1; // @[Scratchpad.scala:381:74]
assign _vsm_in_q_io_enq_bits_scale_T = _vsm_in_q_io_enq_bits_scale_WIRE_1; // @[Scratchpad.scala:381:74]
wire [31:0] _vsm_in_q_io_enq_bits_scale_WIRE_bits = _vsm_in_q_io_enq_bits_scale_T; // @[Scratchpad.scala:381:74]
wire _mvin_scale_pixel_repeater_io_req_bits_laddr_T_6; // @[Scratchpad.scala:391:89]
wire _mvin_scale_pixel_repeater_io_req_bits_laddr_T_5; // @[Scratchpad.scala:391:89]
wire mvin_scale_pixel_repeater_io_req_bits_laddr_result_is_acc_addr = _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_is_acc_addr; // @[Scratchpad.scala:391:89]
wire _mvin_scale_pixel_repeater_io_req_bits_laddr_T_4; // @[Scratchpad.scala:391:89]
wire mvin_scale_pixel_repeater_io_req_bits_laddr_result_accumulate = _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_accumulate; // @[Scratchpad.scala:391:89]
wire [2:0] _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_3; // @[Scratchpad.scala:391:89]
wire mvin_scale_pixel_repeater_io_req_bits_laddr_result_read_full_acc_row = _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_read_full_acc_row; // @[Scratchpad.scala:391:89]
wire [10:0] _mvin_scale_pixel_repeater_io_req_bits_laddr_T_2; // @[Scratchpad.scala:391:89]
wire [2:0] mvin_scale_pixel_repeater_io_req_bits_laddr_result_norm_cmd = _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_norm_cmd; // @[Scratchpad.scala:391:89]
wire _mvin_scale_pixel_repeater_io_req_bits_laddr_T_1; // @[Scratchpad.scala:391:89]
wire [10:0] mvin_scale_pixel_repeater_io_req_bits_laddr_result_garbage = _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_garbage; // @[Scratchpad.scala:391:89]
wire [13:0] _mvin_scale_pixel_repeater_io_req_bits_laddr_T; // @[Scratchpad.scala:391:89]
wire mvin_scale_pixel_repeater_io_req_bits_laddr_result_garbage_bit = _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_garbage_bit; // @[Scratchpad.scala:391:89]
wire [31:0] _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_1 = {18'h0, _vsm_io_resp_bits_tag_addr}; // @[VectorScalarMultiplier.scala:200:21]
assign _mvin_scale_pixel_repeater_io_req_bits_laddr_T = _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_1[13:0]; // @[Scratchpad.scala:391:89]
wire [13:0] _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_data = _mvin_scale_pixel_repeater_io_req_bits_laddr_T; // @[Scratchpad.scala:391:89]
assign _mvin_scale_pixel_repeater_io_req_bits_laddr_T_1 = _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_1[14]; // @[Scratchpad.scala:391:89]
assign _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_garbage_bit = _mvin_scale_pixel_repeater_io_req_bits_laddr_T_1; // @[Scratchpad.scala:391:89]
assign _mvin_scale_pixel_repeater_io_req_bits_laddr_T_2 = _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_1[25:15]; // @[Scratchpad.scala:391:89]
assign _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_garbage = _mvin_scale_pixel_repeater_io_req_bits_laddr_T_2; // @[Scratchpad.scala:391:89]
wire [2:0] _mvin_scale_pixel_repeater_io_req_bits_laddr_T_3 = _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_1[28:26]; // @[Scratchpad.scala:391:89]
wire [2:0] _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_2 = _mvin_scale_pixel_repeater_io_req_bits_laddr_T_3; // @[Scratchpad.scala:391:89]
assign _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_3 = _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_2; // @[Scratchpad.scala:391:89]
assign _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_norm_cmd = _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_3; // @[Scratchpad.scala:391:89]
assign _mvin_scale_pixel_repeater_io_req_bits_laddr_T_4 = _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_1[29]; // @[Scratchpad.scala:391:89]
assign _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_read_full_acc_row = _mvin_scale_pixel_repeater_io_req_bits_laddr_T_4; // @[Scratchpad.scala:391:89]
assign _mvin_scale_pixel_repeater_io_req_bits_laddr_T_5 = _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_1[30]; // @[Scratchpad.scala:391:89]
assign _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_accumulate = _mvin_scale_pixel_repeater_io_req_bits_laddr_T_5; // @[Scratchpad.scala:391:89]
assign _mvin_scale_pixel_repeater_io_req_bits_laddr_T_6 = _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_1[31]; // @[Scratchpad.scala:391:89]
assign _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_is_acc_addr = _mvin_scale_pixel_repeater_io_req_bits_laddr_T_6; // @[Scratchpad.scala:391:89]
wire [13:0] mvin_scale_pixel_repeater_io_req_bits_laddr_result_data; // @[LocalAddr.scala:50:26]
wire [16:0] _mvin_scale_pixel_repeater_io_req_bits_laddr_result_data_T = {3'h0, _mvin_scale_pixel_repeater_io_req_bits_laddr_WIRE_data} + {1'h0, _vsm_io_resp_bits_row}; // @[VectorScalarMultiplier.scala:200:21]
wire [15:0] _mvin_scale_pixel_repeater_io_req_bits_laddr_result_data_T_1 = _mvin_scale_pixel_repeater_io_req_bits_laddr_result_data_T[15:0]; // @[LocalAddr.scala:51:25]
assign mvin_scale_pixel_repeater_io_req_bits_laddr_result_data = _mvin_scale_pixel_repeater_io_req_bits_laddr_result_data_T_1[13:0]; // @[LocalAddr.scala:50:26, :51:{17,25}]
wire _GEN_0 = _reader_io_resp_bits_is_acc & _reader_io_resp_bits_has_acc_bitwidth; // @[Scratchpad.scala:189:26, :402:44]
wire _vsm_in_q_io_enq_valid_T_6; // @[Scratchpad.scala:402:44]
assign _vsm_in_q_io_enq_valid_T_6 = _GEN_0; // @[Scratchpad.scala:402:44]
wire _reader_io_resp_ready_T; // @[Scratchpad.scala:413:74]
assign _reader_io_resp_ready_T = _GEN_0; // @[Scratchpad.scala:402:44, :413:74]
wire _vsm_in_q_io_enq_valid_T_7 = _reader_io_resp_valid & _vsm_in_q_io_enq_valid_T_6; // @[Scratchpad.scala:189:26, :401:62, :402:44]
wire _vsm_in_q_io_enq_bits_scale_WIRE_2 = _reader_io_resp_bits_scale[0]; // @[Scratchpad.scala:189:26, :404:80]
wire _reader_io_resp_ready_T_1 = _reader_io_resp_ready_T ? _vsm_in_q_1_io_enq_ready : _vsm_in_q_io_enq_ready; // @[VectorScalarMultiplier.scala:201:26]
wire _mvin_scale_finished_T = mvin_scale_pixel_repeater_io_resp_ready & _mvin_scale_pixel_repeater_io_resp_valid; // @[Decoupled.scala:51:35]
wire mvin_scale_finished = _mvin_scale_finished_T & _mvin_scale_pixel_repeater_io_resp_bits_last; // @[Decoupled.scala:51:35]
wire _mvin_scale_acc_finished_T = vsm_1_io_resp_ready & _vsm_1_io_resp_valid; // @[Decoupled.scala:51:35]
wire mvin_scale_acc_finished = _mvin_scale_acc_finished_T & _vsm_1_io_resp_bits_last; // @[Decoupled.scala:51:35]
wire _zero_writer_finished_T = zero_writer_pixel_repeater_io_resp_ready & _zero_writer_pixel_repeater_io_resp_valid; // @[Decoupled.scala:51:35]
wire zero_writer_finished = _zero_writer_finished_T & _zero_writer_pixel_repeater_io_resp_bits_last; // @[Decoupled.scala:51:35]
wire [18:0] _zero_writer_bytes_read_T = {1'h0, _zero_writer_pixel_repeater_io_resp_bits_tag_cols, 2'h0}; // @[Scratchpad.scala:330:44, :421:56]
wire [16:0] _zero_writer_bytes_read_T_1 = {1'h0, _zero_writer_pixel_repeater_io_resp_bits_tag_cols}; // @[Scratchpad.scala:330:44, :422:56]
wire [18:0] zero_writer_bytes_read = _zero_writer_pixel_repeater_io_resp_bits_laddr_is_acc_addr ? _zero_writer_bytes_read_T : {2'h0, _zero_writer_bytes_read_T_1}; // @[Scratchpad.scala:330:44, :420:37, :421:56, :422:56]
wire _io_dma_read_resp_valid_T = mvin_scale_finished | mvin_scale_acc_finished; // @[Scratchpad.scala:416:70, :417:59, :425:51]
assign _io_dma_read_resp_valid_T_1 = _io_dma_read_resp_valid_T | zero_writer_finished; // @[Scratchpad.scala:418:72, :425:{51,78}]
assign io_dma_read_resp_valid_0 = _io_dma_read_resp_valid_T_1; // @[Scratchpad.scala:205:9, :425:78]
wire [7:0] _io_dma_read_resp_bits_cmd_id_T = mvin_scale_acc_finished ? _vsm_1_io_resp_bits_tag_cmd_id : _zero_writer_pixel_repeater_io_resp_bits_tag_cmd_id; // @[Mux.scala:126:16]
assign _io_dma_read_resp_bits_cmd_id_T_1 = mvin_scale_finished ? _mvin_scale_pixel_repeater_io_resp_bits_tag_cmd_id : _io_dma_read_resp_bits_cmd_id_T; // @[Mux.scala:126:16]
assign io_dma_read_resp_bits_cmd_id_0 = _io_dma_read_resp_bits_cmd_id_T_1; // @[Mux.scala:126:16]
wire [18:0] _io_dma_read_resp_bits_bytesRead_T = mvin_scale_acc_finished ? {11'h0, _vsm_1_io_resp_bits_tag_bytes_read} : zero_writer_bytes_read; // @[Mux.scala:126:16]
wire [18:0] _io_dma_read_resp_bits_bytesRead_T_1 = mvin_scale_finished ? {11'h0, _mvin_scale_pixel_repeater_io_resp_bits_tag_bytes_read} : _io_dma_read_resp_bits_bytesRead_T; // @[Mux.scala:126:16]
assign io_dma_read_resp_bits_bytesRead_0 = _io_dma_read_resp_bits_bytesRead_T_1[15:0]; // @[Mux.scala:126:16]
wire _io_busy_T = _writer_io_busy | _reader_io_busy; // @[Scratchpad.scala:189:26, :192:26, :444:38]
wire _io_busy_T_1 = _io_busy_T | _write_issue_q_io_deq_valid; // @[Scratchpad.scala:254:31, :444:{38,63}]
wire _io_busy_T_2 = _io_busy_T_1 | _write_norm_q_io_deq_valid; // @[Scratchpad.scala:252:30, :444:{63,93}]
wire _io_busy_T_3 = _io_busy_T_2 | _write_scale_q_io_deq_valid; // @[Scratchpad.scala:253:31, :444:{93,122}]
assign _io_busy_T_4 = _io_busy_T_3 | _write_dispatch_q_q_io_deq_valid; // @[Decoupled.scala:362:21]
assign io_busy_0 = _io_busy_T_4; // @[Scratchpad.scala:205:9, :444:152]
assign io_srams_read_0_req_ready_0 = bank_ios_0_read_req_ready; // @[Scratchpad.scala:205:9, :452:29]
wire _bank_ios_0_read_req_valid_T; // @[Scratchpad.scala:468:38]
wire _bank_ios_0_read_resp_ready_T; // @[Scratchpad.scala:499:35]
wire [127:0] dma_read_resp_bits_data = bank_ios_0_read_resp_bits_data; // @[Scratchpad.scala:452:29, :489:33]
wire [127:0] ex_read_resp_bits_data = bank_ios_0_read_resp_bits_data; // @[Scratchpad.scala:452:29, :492:32]
wire dma_read_resp_bits_fromDMA = bank_ios_0_read_resp_bits_fromDMA; // @[Scratchpad.scala:452:29, :489:33]
wire ex_read_resp_bits_fromDMA = bank_ios_0_read_resp_bits_fromDMA; // @[Scratchpad.scala:452:29, :492:32]
wire _bank_ios_0_write_en_T_1; // @[Scratchpad.scala:532:44]
wire _dmawrite_T_9 = bank_ios_0_write_en; // @[Scratchpad.scala:452:29, :465:26]
assign io_srams_read_1_req_ready_0 = bank_ios_1_read_req_ready; // @[Scratchpad.scala:205:9, :452:29]
wire _bank_ios_1_read_req_valid_T; // @[Scratchpad.scala:468:38]
wire _bank_ios_1_read_resp_ready_T; // @[Scratchpad.scala:499:35]
wire [127:0] dma_read_resp_1_bits_data = bank_ios_1_read_resp_bits_data; // @[Scratchpad.scala:452:29, :489:33]
wire [127:0] ex_read_resp_1_bits_data = bank_ios_1_read_resp_bits_data; // @[Scratchpad.scala:452:29, :492:32]
wire dma_read_resp_1_bits_fromDMA = bank_ios_1_read_resp_bits_fromDMA; // @[Scratchpad.scala:452:29, :489:33]
wire ex_read_resp_1_bits_fromDMA = bank_ios_1_read_resp_bits_fromDMA; // @[Scratchpad.scala:452:29, :492:32]
wire _bank_ios_1_write_en_T_1; // @[Scratchpad.scala:532:44]
wire _dmawrite_T_25 = bank_ios_1_write_en; // @[Scratchpad.scala:452:29, :465:26]
assign io_srams_read_2_req_ready_0 = bank_ios_2_read_req_ready; // @[Scratchpad.scala:205:9, :452:29]
wire _bank_ios_2_read_req_valid_T; // @[Scratchpad.scala:468:38]
wire _bank_ios_2_read_resp_ready_T; // @[Scratchpad.scala:499:35]
wire [127:0] dma_read_resp_2_bits_data = bank_ios_2_read_resp_bits_data; // @[Scratchpad.scala:452:29, :489:33]
wire [127:0] ex_read_resp_2_bits_data = bank_ios_2_read_resp_bits_data; // @[Scratchpad.scala:452:29, :492:32]
wire dma_read_resp_2_bits_fromDMA = bank_ios_2_read_resp_bits_fromDMA; // @[Scratchpad.scala:452:29, :489:33]
wire ex_read_resp_2_bits_fromDMA = bank_ios_2_read_resp_bits_fromDMA; // @[Scratchpad.scala:452:29, :492:32]
wire _bank_ios_2_write_en_T_1; // @[Scratchpad.scala:532:44]
wire _dmawrite_T_41 = bank_ios_2_write_en; // @[Scratchpad.scala:452:29, :465:26]
assign io_srams_read_3_req_ready_0 = bank_ios_3_read_req_ready; // @[Scratchpad.scala:205:9, :452:29]
wire _bank_ios_3_read_req_valid_T; // @[Scratchpad.scala:468:38]
wire _bank_ios_3_read_resp_ready_T; // @[Scratchpad.scala:499:35]
wire [127:0] dma_read_resp_3_bits_data = bank_ios_3_read_resp_bits_data; // @[Scratchpad.scala:452:29, :489:33]
wire [127:0] ex_read_resp_3_bits_data = bank_ios_3_read_resp_bits_data; // @[Scratchpad.scala:452:29, :492:32]
wire dma_read_resp_3_bits_fromDMA = bank_ios_3_read_resp_bits_fromDMA; // @[Scratchpad.scala:452:29, :489:33]
wire ex_read_resp_3_bits_fromDMA = bank_ios_3_read_resp_bits_fromDMA; // @[Scratchpad.scala:452:29, :492:32]
wire _bank_ios_3_write_en_T_1; // @[Scratchpad.scala:532:44]
wire _dmawrite_T_57 = bank_ios_3_write_en; // @[Scratchpad.scala:452:29, :465:26]
wire [11:0] bank_ios_0_read_req_bits_addr; // @[Scratchpad.scala:452:29]
wire bank_ios_0_read_req_bits_fromDMA; // @[Scratchpad.scala:452:29]
wire bank_ios_0_read_req_valid; // @[Scratchpad.scala:452:29]
wire bank_ios_0_read_resp_ready; // @[Scratchpad.scala:452:29]
wire bank_ios_0_read_resp_valid; // @[Scratchpad.scala:452:29]
wire bank_ios_0_write_mask_0; // @[Scratchpad.scala:452:29]
wire bank_ios_0_write_mask_1; // @[Scratchpad.scala:452:29]
wire bank_ios_0_write_mask_2; // @[Scratchpad.scala:452:29]
wire bank_ios_0_write_mask_3; // @[Scratchpad.scala:452:29]
wire bank_ios_0_write_mask_4; // @[Scratchpad.scala:452:29]
wire bank_ios_0_write_mask_5; // @[Scratchpad.scala:452:29]
wire bank_ios_0_write_mask_6; // @[Scratchpad.scala:452:29]
wire bank_ios_0_write_mask_7; // @[Scratchpad.scala:452:29]
wire bank_ios_0_write_mask_8; // @[Scratchpad.scala:452:29]
wire bank_ios_0_write_mask_9; // @[Scratchpad.scala:452:29]
wire bank_ios_0_write_mask_10; // @[Scratchpad.scala:452:29]
wire bank_ios_0_write_mask_11; // @[Scratchpad.scala:452:29]
wire bank_ios_0_write_mask_12; // @[Scratchpad.scala:452:29]
wire bank_ios_0_write_mask_13; // @[Scratchpad.scala:452:29]
wire bank_ios_0_write_mask_14; // @[Scratchpad.scala:452:29]
wire bank_ios_0_write_mask_15; // @[Scratchpad.scala:452:29]
wire [11:0] bank_ios_0_write_addr; // @[Scratchpad.scala:452:29]
wire [127:0] bank_ios_0_write_data; // @[Scratchpad.scala:452:29]
wire [11:0] bank_ios_1_read_req_bits_addr; // @[Scratchpad.scala:452:29]
wire bank_ios_1_read_req_bits_fromDMA; // @[Scratchpad.scala:452:29]
wire bank_ios_1_read_req_valid; // @[Scratchpad.scala:452:29]
wire bank_ios_1_read_resp_ready; // @[Scratchpad.scala:452:29]
wire bank_ios_1_read_resp_valid; // @[Scratchpad.scala:452:29]
wire bank_ios_1_write_mask_0; // @[Scratchpad.scala:452:29]
wire bank_ios_1_write_mask_1; // @[Scratchpad.scala:452:29]
wire bank_ios_1_write_mask_2; // @[Scratchpad.scala:452:29]
wire bank_ios_1_write_mask_3; // @[Scratchpad.scala:452:29]
wire bank_ios_1_write_mask_4; // @[Scratchpad.scala:452:29]
wire bank_ios_1_write_mask_5; // @[Scratchpad.scala:452:29]
wire bank_ios_1_write_mask_6; // @[Scratchpad.scala:452:29]
wire bank_ios_1_write_mask_7; // @[Scratchpad.scala:452:29]
wire bank_ios_1_write_mask_8; // @[Scratchpad.scala:452:29]
wire bank_ios_1_write_mask_9; // @[Scratchpad.scala:452:29]
wire bank_ios_1_write_mask_10; // @[Scratchpad.scala:452:29]
wire bank_ios_1_write_mask_11; // @[Scratchpad.scala:452:29]
wire bank_ios_1_write_mask_12; // @[Scratchpad.scala:452:29]
wire bank_ios_1_write_mask_13; // @[Scratchpad.scala:452:29]
wire bank_ios_1_write_mask_14; // @[Scratchpad.scala:452:29]
wire bank_ios_1_write_mask_15; // @[Scratchpad.scala:452:29]
wire [11:0] bank_ios_1_write_addr; // @[Scratchpad.scala:452:29]
wire [127:0] bank_ios_1_write_data; // @[Scratchpad.scala:452:29]
wire [11:0] bank_ios_2_read_req_bits_addr; // @[Scratchpad.scala:452:29]
wire bank_ios_2_read_req_bits_fromDMA; // @[Scratchpad.scala:452:29]
wire bank_ios_2_read_req_valid; // @[Scratchpad.scala:452:29]
wire bank_ios_2_read_resp_ready; // @[Scratchpad.scala:452:29]
wire bank_ios_2_read_resp_valid; // @[Scratchpad.scala:452:29]
wire bank_ios_2_write_mask_0; // @[Scratchpad.scala:452:29]
wire bank_ios_2_write_mask_1; // @[Scratchpad.scala:452:29]
wire bank_ios_2_write_mask_2; // @[Scratchpad.scala:452:29]
wire bank_ios_2_write_mask_3; // @[Scratchpad.scala:452:29]
wire bank_ios_2_write_mask_4; // @[Scratchpad.scala:452:29]
wire bank_ios_2_write_mask_5; // @[Scratchpad.scala:452:29]
wire bank_ios_2_write_mask_6; // @[Scratchpad.scala:452:29]
wire bank_ios_2_write_mask_7; // @[Scratchpad.scala:452:29]
wire bank_ios_2_write_mask_8; // @[Scratchpad.scala:452:29]
wire bank_ios_2_write_mask_9; // @[Scratchpad.scala:452:29]
wire bank_ios_2_write_mask_10; // @[Scratchpad.scala:452:29]
wire bank_ios_2_write_mask_11; // @[Scratchpad.scala:452:29]
wire bank_ios_2_write_mask_12; // @[Scratchpad.scala:452:29]
wire bank_ios_2_write_mask_13; // @[Scratchpad.scala:452:29]
wire bank_ios_2_write_mask_14; // @[Scratchpad.scala:452:29]
wire bank_ios_2_write_mask_15; // @[Scratchpad.scala:452:29]
wire [11:0] bank_ios_2_write_addr; // @[Scratchpad.scala:452:29]
wire [127:0] bank_ios_2_write_data; // @[Scratchpad.scala:452:29]
wire [11:0] bank_ios_3_read_req_bits_addr; // @[Scratchpad.scala:452:29]
wire bank_ios_3_read_req_bits_fromDMA; // @[Scratchpad.scala:452:29]
wire bank_ios_3_read_req_valid; // @[Scratchpad.scala:452:29]
wire bank_ios_3_read_resp_ready; // @[Scratchpad.scala:452:29]
wire bank_ios_3_read_resp_valid; // @[Scratchpad.scala:452:29]
wire bank_ios_3_write_mask_0; // @[Scratchpad.scala:452:29]
wire bank_ios_3_write_mask_1; // @[Scratchpad.scala:452:29]
wire bank_ios_3_write_mask_2; // @[Scratchpad.scala:452:29]
wire bank_ios_3_write_mask_3; // @[Scratchpad.scala:452:29]
wire bank_ios_3_write_mask_4; // @[Scratchpad.scala:452:29]
wire bank_ios_3_write_mask_5; // @[Scratchpad.scala:452:29]
wire bank_ios_3_write_mask_6; // @[Scratchpad.scala:452:29]
wire bank_ios_3_write_mask_7; // @[Scratchpad.scala:452:29]
wire bank_ios_3_write_mask_8; // @[Scratchpad.scala:452:29]
wire bank_ios_3_write_mask_9; // @[Scratchpad.scala:452:29]
wire bank_ios_3_write_mask_10; // @[Scratchpad.scala:452:29]
wire bank_ios_3_write_mask_11; // @[Scratchpad.scala:452:29]
wire bank_ios_3_write_mask_12; // @[Scratchpad.scala:452:29]
wire bank_ios_3_write_mask_13; // @[Scratchpad.scala:452:29]
wire bank_ios_3_write_mask_14; // @[Scratchpad.scala:452:29]
wire bank_ios_3_write_mask_15; // @[Scratchpad.scala:452:29]
wire [11:0] bank_ios_3_write_addr; // @[Scratchpad.scala:452:29]
wire [127:0] bank_ios_3_write_data; // @[Scratchpad.scala:452:29]
wire _GEN_1 = _write_dispatch_q_q_io_deq_valid & _write_norm_q_io_enq_ready; // @[Decoupled.scala:362:21]
wire _dmawrite_T; // @[Scratchpad.scala:463:47]
assign _dmawrite_T = _GEN_1; // @[Scratchpad.scala:463:47]
wire _dmawrite_T_16; // @[Scratchpad.scala:463:47]
assign _dmawrite_T_16 = _GEN_1; // @[Scratchpad.scala:463:47]
wire _dmawrite_T_32; // @[Scratchpad.scala:463:47]
assign _dmawrite_T_32 = _GEN_1; // @[Scratchpad.scala:463:47]
wire _dmawrite_T_48; // @[Scratchpad.scala:463:47]
assign _dmawrite_T_48 = _GEN_1; // @[Scratchpad.scala:463:47]
wire _dmawrite_T_64; // @[Scratchpad.scala:657:47]
assign _dmawrite_T_64 = _GEN_1; // @[Scratchpad.scala:463:47, :657:47]
wire _dmawrite_T_76; // @[Scratchpad.scala:657:47]
assign _dmawrite_T_76 = _GEN_1; // @[Scratchpad.scala:463:47, :657:47]
wire _dmawrite_T_2 = _dmawrite_T_1 & _write_dispatch_q_q_io_deq_bits_laddr_read_full_acc_row; // @[Decoupled.scala:362:21]
wire _dmawrite_T_3 = &_write_dispatch_q_q_io_deq_bits_laddr_data; // @[Decoupled.scala:362:21]
wire _dmawrite_T_4 = _dmawrite_T_2 & _dmawrite_T_3; // @[LocalAddr.scala:43:{62,83,91}]
wire _dmawrite_T_5; // @[LocalAddr.scala:44:48]
wire _dmawrite_T_6 = _dmawrite_T_4 & _dmawrite_T_5; // @[LocalAddr.scala:43:{83,96}, :44:48]
wire _dmawrite_T_7 = ~_dmawrite_T_6; // @[Scratchpad.scala:464:11]
wire _dmawrite_T_8 = _dmawrite_T & _dmawrite_T_7; // @[Scratchpad.scala:463:{47,76}, :464:11]
wire _dmawrite_T_10 = ~_dmawrite_T_9; // @[Scratchpad.scala:465:{11,26}]
wire _dmawrite_T_11 = _dmawrite_T_8 & _dmawrite_T_10; // @[Scratchpad.scala:463:76, :464:53, :465:11]
wire _dmawrite_T_12 = ~_write_dispatch_q_q_io_deq_bits_laddr_is_acc_addr; // @[Decoupled.scala:362:21]
wire _dmawrite_T_13 = _dmawrite_T_11 & _dmawrite_T_12; // @[Scratchpad.scala:464:53, :465:55, :466:11]
wire [1:0] _dmawrite_T_14 = _write_dispatch_q_q_io_deq_bits_laddr_data[13:12]; // @[Decoupled.scala:362:21]
wire [1:0] _dmawrite_T_30 = _write_dispatch_q_q_io_deq_bits_laddr_data[13:12]; // @[Decoupled.scala:362:21]
wire [1:0] _dmawrite_T_46 = _write_dispatch_q_q_io_deq_bits_laddr_data[13:12]; // @[Decoupled.scala:362:21]
wire [1:0] _dmawrite_T_62 = _write_dispatch_q_q_io_deq_bits_laddr_data[13:12]; // @[Decoupled.scala:362:21]
wire _dmawrite_T_15 = _dmawrite_T_14 == 2'h0; // @[Scratchpad.scala:466:93]
wire dmawrite = _dmawrite_T_13 & _dmawrite_T_15; // @[Scratchpad.scala:465:55, :466:{52,93}]
assign _bank_ios_0_read_req_valid_T = io_srams_read_0_req_valid_0 | dmawrite; // @[Scratchpad.scala:205:9, :466:52, :468:38]
assign bank_ios_0_read_req_valid = _bank_ios_0_read_req_valid_T; // @[Scratchpad.scala:452:29, :468:38]
wire [11:0] _bank_ios_0_read_req_bits_addr_T = _write_dispatch_q_q_io_deq_bits_laddr_data[11:0]; // @[Decoupled.scala:362:21]
wire [11:0] _bank_ios_1_read_req_bits_addr_T = _write_dispatch_q_q_io_deq_bits_laddr_data[11:0]; // @[Decoupled.scala:362:21]
wire [11:0] _bank_ios_2_read_req_bits_addr_T = _write_dispatch_q_q_io_deq_bits_laddr_data[11:0]; // @[Decoupled.scala:362:21]
wire [11:0] _bank_ios_3_read_req_bits_addr_T = _write_dispatch_q_q_io_deq_bits_laddr_data[11:0]; // @[Decoupled.scala:362:21]
wire _GEN_2 = ~io_srams_read_0_req_valid_0 & dmawrite & bank_ios_0_read_req_ready & bank_ios_0_read_req_valid; // @[Decoupled.scala:51:35]
assign bank_ios_0_read_req_bits_fromDMA = ~io_srams_read_0_req_valid_0; // @[Scratchpad.scala:205:9, :452:29, :472:23, :474:37, :475:31]
assign bank_ios_0_read_req_bits_addr = io_srams_read_0_req_valid_0 ? io_srams_read_0_req_bits_addr_0 : _bank_ios_0_read_req_bits_addr_T; // @[Scratchpad.scala:205:9, :452:29, :472:23, :473:34, :475:31]
wire _dma_read_resp_valid_T; // @[Scratchpad.scala:490:52]
wire dma_read_resp_ready; // @[Scratchpad.scala:489:33]
wire dma_read_resp_valid; // @[Scratchpad.scala:489:33]
assign _dma_read_resp_valid_T = bank_ios_0_read_resp_valid & bank_ios_0_read_resp_bits_fromDMA; // @[Scratchpad.scala:452:29, :490:52]
assign dma_read_resp_valid = _dma_read_resp_valid_T; // @[Scratchpad.scala:489:33, :490:52]
wire _ex_read_resp_valid_T_1; // @[Scratchpad.scala:493:51]
wire ex_read_resp_ready; // @[Scratchpad.scala:492:32]
wire ex_read_resp_valid; // @[Scratchpad.scala:492:32]
wire _ex_read_resp_valid_T = ~bank_ios_0_read_resp_bits_fromDMA; // @[Scratchpad.scala:452:29, :493:54]
assign _ex_read_resp_valid_T_1 = bank_ios_0_read_resp_valid & _ex_read_resp_valid_T; // @[Scratchpad.scala:452:29, :493:{51,54}]
assign ex_read_resp_valid = _ex_read_resp_valid_T_1; // @[Scratchpad.scala:492:32, :493:51]
assign _bank_ios_0_read_resp_ready_T = bank_ios_0_read_resp_bits_fromDMA ? dma_read_resp_ready : ex_read_resp_ready; // @[Scratchpad.scala:452:29, :489:33, :492:32, :499:35]
assign bank_ios_0_read_resp_ready = _bank_ios_0_read_resp_ready_T; // @[Scratchpad.scala:452:29, :499:35]
wire _p_io_out_ready_T = ~_write_issue_q_io_deq_bits_laddr_is_acc_addr; // @[Scratchpad.scala:254:31, :502:11]
wire _p_io_out_ready_T_1 = _writer_io_req_ready & _p_io_out_ready_T; // @[Scratchpad.scala:192:26, :501:59, :502:11]
wire [1:0] _p_io_out_ready_T_2 = _write_issue_q_io_deq_bits_laddr_data[13:12]; // @[Scratchpad.scala:254:31]
wire [1:0] _p_io_out_ready_T_15 = _write_issue_q_io_deq_bits_laddr_data[13:12]; // @[Scratchpad.scala:254:31]
wire [1:0] _p_io_out_ready_T_28 = _write_issue_q_io_deq_bits_laddr_data[13:12]; // @[Scratchpad.scala:254:31]
wire [1:0] _p_io_out_ready_T_41 = _write_issue_q_io_deq_bits_laddr_data[13:12]; // @[Scratchpad.scala:254:31]
wire _p_io_out_ready_T_3 = _p_io_out_ready_T_2 == 2'h0; // @[Scratchpad.scala:502:101]
wire _p_io_out_ready_T_4 = _p_io_out_ready_T_1 & _p_io_out_ready_T_3; // @[Scratchpad.scala:501:59, :502:{56,101}]
wire _p_io_out_ready_T_6 = _p_io_out_ready_T_5 & _write_issue_q_io_deq_bits_laddr_read_full_acc_row; // @[Scratchpad.scala:254:31]
wire _p_io_out_ready_T_7 = &_write_issue_q_io_deq_bits_laddr_data; // @[Scratchpad.scala:254:31]
wire _p_io_out_ready_T_8 = _p_io_out_ready_T_6 & _p_io_out_ready_T_7; // @[LocalAddr.scala:43:{62,83,91}]
wire _p_io_out_ready_T_9; // @[LocalAddr.scala:44:48]
wire _p_io_out_ready_T_10 = _p_io_out_ready_T_8 & _p_io_out_ready_T_9; // @[LocalAddr.scala:43:{83,96}, :44:48]
wire _p_io_out_ready_T_11 = ~_p_io_out_ready_T_10; // @[Scratchpad.scala:503:11]
wire _p_io_out_ready_T_12 = _p_io_out_ready_T_4 & _p_io_out_ready_T_11; // @[Scratchpad.scala:502:{56,109}, :503:11]
wire _dmawrite_T_18 = _dmawrite_T_17 & _write_dispatch_q_q_io_deq_bits_laddr_read_full_acc_row; // @[Decoupled.scala:362:21]
wire _dmawrite_T_19 = &_write_dispatch_q_q_io_deq_bits_laddr_data; // @[Decoupled.scala:362:21]
wire _dmawrite_T_20 = _dmawrite_T_18 & _dmawrite_T_19; // @[LocalAddr.scala:43:{62,83,91}]
wire _dmawrite_T_21; // @[LocalAddr.scala:44:48]
wire _dmawrite_T_22 = _dmawrite_T_20 & _dmawrite_T_21; // @[LocalAddr.scala:43:{83,96}, :44:48]
wire _dmawrite_T_23 = ~_dmawrite_T_22; // @[Scratchpad.scala:464:11]
wire _dmawrite_T_24 = _dmawrite_T_16 & _dmawrite_T_23; // @[Scratchpad.scala:463:{47,76}, :464:11]
wire _dmawrite_T_26 = ~_dmawrite_T_25; // @[Scratchpad.scala:465:{11,26}]
wire _dmawrite_T_27 = _dmawrite_T_24 & _dmawrite_T_26; // @[Scratchpad.scala:463:76, :464:53, :465:11]
wire _dmawrite_T_28 = ~_write_dispatch_q_q_io_deq_bits_laddr_is_acc_addr; // @[Decoupled.scala:362:21]
wire _dmawrite_T_29 = _dmawrite_T_27 & _dmawrite_T_28; // @[Scratchpad.scala:464:53, :465:55, :466:11]
wire _dmawrite_T_31 = _dmawrite_T_30 == 2'h1; // @[Scratchpad.scala:466:93]
wire dmawrite_1 = _dmawrite_T_29 & _dmawrite_T_31; // @[Scratchpad.scala:465:55, :466:{52,93}]
assign _bank_ios_1_read_req_valid_T = io_srams_read_1_req_valid_0 | dmawrite_1; // @[Scratchpad.scala:205:9, :466:52, :468:38]
assign bank_ios_1_read_req_valid = _bank_ios_1_read_req_valid_T; // @[Scratchpad.scala:452:29, :468:38]
wire _GEN_3 = ~io_srams_read_1_req_valid_0 & dmawrite_1 & bank_ios_1_read_req_ready & bank_ios_1_read_req_valid; // @[Decoupled.scala:51:35]
assign bank_ios_1_read_req_bits_fromDMA = ~io_srams_read_1_req_valid_0; // @[Scratchpad.scala:205:9, :452:29, :472:23, :474:37, :475:31]
assign bank_ios_1_read_req_bits_addr = io_srams_read_1_req_valid_0 ? io_srams_read_1_req_bits_addr_0 : _bank_ios_1_read_req_bits_addr_T; // @[Scratchpad.scala:205:9, :452:29, :472:23, :473:34, :475:31]
wire _dma_read_resp_valid_T_1; // @[Scratchpad.scala:490:52]
wire dma_read_resp_1_ready; // @[Scratchpad.scala:489:33]
wire dma_read_resp_1_valid; // @[Scratchpad.scala:489:33]
assign _dma_read_resp_valid_T_1 = bank_ios_1_read_resp_valid & bank_ios_1_read_resp_bits_fromDMA; // @[Scratchpad.scala:452:29, :490:52]
assign dma_read_resp_1_valid = _dma_read_resp_valid_T_1; // @[Scratchpad.scala:489:33, :490:52]
wire _ex_read_resp_valid_T_3; // @[Scratchpad.scala:493:51]
wire ex_read_resp_1_ready; // @[Scratchpad.scala:492:32]
wire ex_read_resp_1_valid; // @[Scratchpad.scala:492:32]
wire _ex_read_resp_valid_T_2 = ~bank_ios_1_read_resp_bits_fromDMA; // @[Scratchpad.scala:452:29, :493:54]
assign _ex_read_resp_valid_T_3 = bank_ios_1_read_resp_valid & _ex_read_resp_valid_T_2; // @[Scratchpad.scala:452:29, :493:{51,54}]
assign ex_read_resp_1_valid = _ex_read_resp_valid_T_3; // @[Scratchpad.scala:492:32, :493:51]
assign _bank_ios_1_read_resp_ready_T = bank_ios_1_read_resp_bits_fromDMA ? dma_read_resp_1_ready : ex_read_resp_1_ready; // @[Scratchpad.scala:452:29, :489:33, :492:32, :499:35]
assign bank_ios_1_read_resp_ready = _bank_ios_1_read_resp_ready_T; // @[Scratchpad.scala:452:29, :499:35]
wire _p_io_out_ready_T_13 = ~_write_issue_q_io_deq_bits_laddr_is_acc_addr; // @[Scratchpad.scala:254:31, :502:11]
wire _p_io_out_ready_T_14 = _writer_io_req_ready & _p_io_out_ready_T_13; // @[Scratchpad.scala:192:26, :501:59, :502:11]
wire _p_io_out_ready_T_16 = _p_io_out_ready_T_15 == 2'h1; // @[Scratchpad.scala:502:101]
wire _p_io_out_ready_T_17 = _p_io_out_ready_T_14 & _p_io_out_ready_T_16; // @[Scratchpad.scala:501:59, :502:{56,101}]
wire _p_io_out_ready_T_19 = _p_io_out_ready_T_18 & _write_issue_q_io_deq_bits_laddr_read_full_acc_row; // @[Scratchpad.scala:254:31]
wire _p_io_out_ready_T_20 = &_write_issue_q_io_deq_bits_laddr_data; // @[Scratchpad.scala:254:31]
wire _p_io_out_ready_T_21 = _p_io_out_ready_T_19 & _p_io_out_ready_T_20; // @[LocalAddr.scala:43:{62,83,91}]
wire _p_io_out_ready_T_22; // @[LocalAddr.scala:44:48]
wire _p_io_out_ready_T_23 = _p_io_out_ready_T_21 & _p_io_out_ready_T_22; // @[LocalAddr.scala:43:{83,96}, :44:48]
wire _p_io_out_ready_T_24 = ~_p_io_out_ready_T_23; // @[Scratchpad.scala:503:11]
wire _p_io_out_ready_T_25 = _p_io_out_ready_T_17 & _p_io_out_ready_T_24; // @[Scratchpad.scala:502:{56,109}, :503:11]
wire _T_97 = _p_io_out_ready_T_25 & _dma_read_pipe_p_1_io_out_valid; // @[Decoupled.scala:51:35]
wire _dmawrite_T_34 = _dmawrite_T_33 & _write_dispatch_q_q_io_deq_bits_laddr_read_full_acc_row; // @[Decoupled.scala:362:21]
wire _dmawrite_T_35 = &_write_dispatch_q_q_io_deq_bits_laddr_data; // @[Decoupled.scala:362:21]
wire _dmawrite_T_36 = _dmawrite_T_34 & _dmawrite_T_35; // @[LocalAddr.scala:43:{62,83,91}]
wire _dmawrite_T_37; // @[LocalAddr.scala:44:48]
wire _dmawrite_T_38 = _dmawrite_T_36 & _dmawrite_T_37; // @[LocalAddr.scala:43:{83,96}, :44:48]
wire _dmawrite_T_39 = ~_dmawrite_T_38; // @[Scratchpad.scala:464:11]
wire _dmawrite_T_40 = _dmawrite_T_32 & _dmawrite_T_39; // @[Scratchpad.scala:463:{47,76}, :464:11]
wire _dmawrite_T_42 = ~_dmawrite_T_41; // @[Scratchpad.scala:465:{11,26}]
wire _dmawrite_T_43 = _dmawrite_T_40 & _dmawrite_T_42; // @[Scratchpad.scala:463:76, :464:53, :465:11]
wire _dmawrite_T_44 = ~_write_dispatch_q_q_io_deq_bits_laddr_is_acc_addr; // @[Decoupled.scala:362:21]
wire _dmawrite_T_45 = _dmawrite_T_43 & _dmawrite_T_44; // @[Scratchpad.scala:464:53, :465:55, :466:11]
wire _dmawrite_T_47 = _dmawrite_T_46 == 2'h2; // @[Scratchpad.scala:466:93]
wire dmawrite_2 = _dmawrite_T_45 & _dmawrite_T_47; // @[Scratchpad.scala:465:55, :466:{52,93}]
assign _bank_ios_2_read_req_valid_T = io_srams_read_2_req_valid_0 | dmawrite_2; // @[Scratchpad.scala:205:9, :466:52, :468:38]
assign bank_ios_2_read_req_valid = _bank_ios_2_read_req_valid_T; // @[Scratchpad.scala:452:29, :468:38]
wire _GEN_4 = ~io_srams_read_2_req_valid_0 & dmawrite_2 & bank_ios_2_read_req_ready & bank_ios_2_read_req_valid; // @[Decoupled.scala:51:35]
assign bank_ios_2_read_req_bits_fromDMA = ~io_srams_read_2_req_valid_0; // @[Scratchpad.scala:205:9, :452:29, :472:23, :474:37, :475:31]
assign bank_ios_2_read_req_bits_addr = io_srams_read_2_req_valid_0 ? io_srams_read_2_req_bits_addr_0 : _bank_ios_2_read_req_bits_addr_T; // @[Scratchpad.scala:205:9, :452:29, :472:23, :473:34, :475:31]
wire _dma_read_resp_valid_T_2; // @[Scratchpad.scala:490:52]
wire dma_read_resp_2_ready; // @[Scratchpad.scala:489:33]
wire dma_read_resp_2_valid; // @[Scratchpad.scala:489:33]
assign _dma_read_resp_valid_T_2 = bank_ios_2_read_resp_valid & bank_ios_2_read_resp_bits_fromDMA; // @[Scratchpad.scala:452:29, :490:52]
assign dma_read_resp_2_valid = _dma_read_resp_valid_T_2; // @[Scratchpad.scala:489:33, :490:52]
wire _ex_read_resp_valid_T_5; // @[Scratchpad.scala:493:51]
wire ex_read_resp_2_ready; // @[Scratchpad.scala:492:32]
wire ex_read_resp_2_valid; // @[Scratchpad.scala:492:32]
wire _ex_read_resp_valid_T_4 = ~bank_ios_2_read_resp_bits_fromDMA; // @[Scratchpad.scala:452:29, :493:54]
assign _ex_read_resp_valid_T_5 = bank_ios_2_read_resp_valid & _ex_read_resp_valid_T_4; // @[Scratchpad.scala:452:29, :493:{51,54}]
assign ex_read_resp_2_valid = _ex_read_resp_valid_T_5; // @[Scratchpad.scala:492:32, :493:51]
assign _bank_ios_2_read_resp_ready_T = bank_ios_2_read_resp_bits_fromDMA ? dma_read_resp_2_ready : ex_read_resp_2_ready; // @[Scratchpad.scala:452:29, :489:33, :492:32, :499:35]
assign bank_ios_2_read_resp_ready = _bank_ios_2_read_resp_ready_T; // @[Scratchpad.scala:452:29, :499:35]
wire _p_io_out_ready_T_26 = ~_write_issue_q_io_deq_bits_laddr_is_acc_addr; // @[Scratchpad.scala:254:31, :502:11]
wire _p_io_out_ready_T_27 = _writer_io_req_ready & _p_io_out_ready_T_26; // @[Scratchpad.scala:192:26, :501:59, :502:11]
wire _p_io_out_ready_T_29 = _p_io_out_ready_T_28 == 2'h2; // @[Scratchpad.scala:502:101]
wire _p_io_out_ready_T_30 = _p_io_out_ready_T_27 & _p_io_out_ready_T_29; // @[Scratchpad.scala:501:59, :502:{56,101}]
wire _p_io_out_ready_T_32 = _p_io_out_ready_T_31 & _write_issue_q_io_deq_bits_laddr_read_full_acc_row; // @[Scratchpad.scala:254:31]
wire _p_io_out_ready_T_33 = &_write_issue_q_io_deq_bits_laddr_data; // @[Scratchpad.scala:254:31]
wire _p_io_out_ready_T_34 = _p_io_out_ready_T_32 & _p_io_out_ready_T_33; // @[LocalAddr.scala:43:{62,83,91}]
wire _p_io_out_ready_T_35; // @[LocalAddr.scala:44:48]
wire _p_io_out_ready_T_36 = _p_io_out_ready_T_34 & _p_io_out_ready_T_35; // @[LocalAddr.scala:43:{83,96}, :44:48]
wire _p_io_out_ready_T_37 = ~_p_io_out_ready_T_36; // @[Scratchpad.scala:503:11]
wire _p_io_out_ready_T_38 = _p_io_out_ready_T_30 & _p_io_out_ready_T_37; // @[Scratchpad.scala:502:{56,109}, :503:11]
wire _T_99 = _p_io_out_ready_T_38 & _dma_read_pipe_p_2_io_out_valid; // @[Decoupled.scala:51:35]
wire _dmawrite_T_50 = _dmawrite_T_49 & _write_dispatch_q_q_io_deq_bits_laddr_read_full_acc_row; // @[Decoupled.scala:362:21]
wire _dmawrite_T_51 = &_write_dispatch_q_q_io_deq_bits_laddr_data; // @[Decoupled.scala:362:21]
wire _dmawrite_T_52 = _dmawrite_T_50 & _dmawrite_T_51; // @[LocalAddr.scala:43:{62,83,91}]
wire _dmawrite_T_53; // @[LocalAddr.scala:44:48]
wire _dmawrite_T_54 = _dmawrite_T_52 & _dmawrite_T_53; // @[LocalAddr.scala:43:{83,96}, :44:48]
wire _dmawrite_T_55 = ~_dmawrite_T_54; // @[Scratchpad.scala:464:11]
wire _dmawrite_T_56 = _dmawrite_T_48 & _dmawrite_T_55; // @[Scratchpad.scala:463:{47,76}, :464:11]
wire _dmawrite_T_58 = ~_dmawrite_T_57; // @[Scratchpad.scala:465:{11,26}]
wire _dmawrite_T_59 = _dmawrite_T_56 & _dmawrite_T_58; // @[Scratchpad.scala:463:76, :464:53, :465:11]
wire _dmawrite_T_60 = ~_write_dispatch_q_q_io_deq_bits_laddr_is_acc_addr; // @[Decoupled.scala:362:21]
wire _dmawrite_T_61 = _dmawrite_T_59 & _dmawrite_T_60; // @[Scratchpad.scala:464:53, :465:55, :466:11]
wire _dmawrite_T_63 = &_dmawrite_T_62; // @[Scratchpad.scala:466:93]
wire dmawrite_3 = _dmawrite_T_61 & _dmawrite_T_63; // @[Scratchpad.scala:465:55, :466:{52,93}]
assign _bank_ios_3_read_req_valid_T = io_srams_read_3_req_valid_0 | dmawrite_3; // @[Scratchpad.scala:205:9, :466:52, :468:38]
assign bank_ios_3_read_req_valid = _bank_ios_3_read_req_valid_T; // @[Scratchpad.scala:452:29, :468:38]
wire _GEN_5 = ~io_srams_read_3_req_valid_0 & dmawrite_3 & bank_ios_3_read_req_ready & bank_ios_3_read_req_valid; // @[Decoupled.scala:51:35]
assign bank_ios_3_read_req_bits_fromDMA = ~io_srams_read_3_req_valid_0; // @[Scratchpad.scala:205:9, :452:29, :472:23, :474:37, :475:31]
assign bank_ios_3_read_req_bits_addr = io_srams_read_3_req_valid_0 ? io_srams_read_3_req_bits_addr_0 : _bank_ios_3_read_req_bits_addr_T; // @[Scratchpad.scala:205:9, :452:29, :472:23, :473:34, :475:31]
wire _dma_read_resp_valid_T_3; // @[Scratchpad.scala:490:52]
wire dma_read_resp_3_ready; // @[Scratchpad.scala:489:33]
wire dma_read_resp_3_valid; // @[Scratchpad.scala:489:33]
assign _dma_read_resp_valid_T_3 = bank_ios_3_read_resp_valid & bank_ios_3_read_resp_bits_fromDMA; // @[Scratchpad.scala:452:29, :490:52]
assign dma_read_resp_3_valid = _dma_read_resp_valid_T_3; // @[Scratchpad.scala:489:33, :490:52]
wire _ex_read_resp_valid_T_7; // @[Scratchpad.scala:493:51]
wire ex_read_resp_3_ready; // @[Scratchpad.scala:492:32]
wire ex_read_resp_3_valid; // @[Scratchpad.scala:492:32]
wire _ex_read_resp_valid_T_6 = ~bank_ios_3_read_resp_bits_fromDMA; // @[Scratchpad.scala:452:29, :493:54]
assign _ex_read_resp_valid_T_7 = bank_ios_3_read_resp_valid & _ex_read_resp_valid_T_6; // @[Scratchpad.scala:452:29, :493:{51,54}]
assign ex_read_resp_3_valid = _ex_read_resp_valid_T_7; // @[Scratchpad.scala:492:32, :493:51]
assign _bank_ios_3_read_resp_ready_T = bank_ios_3_read_resp_bits_fromDMA ? dma_read_resp_3_ready : ex_read_resp_3_ready; // @[Scratchpad.scala:452:29, :489:33, :492:32, :499:35]
assign bank_ios_3_read_resp_ready = _bank_ios_3_read_resp_ready_T; // @[Scratchpad.scala:452:29, :499:35]
wire _p_io_out_ready_T_39 = ~_write_issue_q_io_deq_bits_laddr_is_acc_addr; // @[Scratchpad.scala:254:31, :502:11]
wire _p_io_out_ready_T_40 = _writer_io_req_ready & _p_io_out_ready_T_39; // @[Scratchpad.scala:192:26, :501:59, :502:11]
wire _p_io_out_ready_T_42 = &_p_io_out_ready_T_41; // @[Scratchpad.scala:502:101]
wire _p_io_out_ready_T_43 = _p_io_out_ready_T_40 & _p_io_out_ready_T_42; // @[Scratchpad.scala:501:59, :502:{56,101}]
wire _p_io_out_ready_T_45 = _p_io_out_ready_T_44 & _write_issue_q_io_deq_bits_laddr_read_full_acc_row; // @[Scratchpad.scala:254:31]
wire _p_io_out_ready_T_46 = &_write_issue_q_io_deq_bits_laddr_data; // @[Scratchpad.scala:254:31]
wire _p_io_out_ready_T_47 = _p_io_out_ready_T_45 & _p_io_out_ready_T_46; // @[LocalAddr.scala:43:{62,83,91}]
wire _p_io_out_ready_T_48; // @[LocalAddr.scala:44:48]
wire _p_io_out_ready_T_49 = _p_io_out_ready_T_47 & _p_io_out_ready_T_48; // @[LocalAddr.scala:43:{83,96}, :44:48]
wire _p_io_out_ready_T_50 = ~_p_io_out_ready_T_49; // @[Scratchpad.scala:503:11]
wire _p_io_out_ready_T_51 = _p_io_out_ready_T_43 & _p_io_out_ready_T_50; // @[Scratchpad.scala:502:{56,109}, :503:11]
wire _T_101 = _p_io_out_ready_T_51 & _dma_read_pipe_p_3_io_out_valid; // @[Decoupled.scala:51:35]
wire _dmaread_T = ~_mvin_scale_pixel_repeater_io_resp_bits_tag_is_acc; // @[Scratchpad.scala:387:43, :520:66]
wire _dmaread_T_1 = _mvin_scale_pixel_repeater_io_resp_valid & _dmaread_T; // @[Scratchpad.scala:387:43, :520:{63,66}]
wire [1:0] _dmaread_T_2 = _mvin_scale_pixel_repeater_io_resp_bits_laddr_data[13:12]; // @[Scratchpad.scala:387:43]
wire [1:0] _dmaread_T_6 = _mvin_scale_pixel_repeater_io_resp_bits_laddr_data[13:12]; // @[Scratchpad.scala:387:43]
wire [1:0] _dmaread_T_10 = _mvin_scale_pixel_repeater_io_resp_bits_laddr_data[13:12]; // @[Scratchpad.scala:387:43]
wire [1:0] _dmaread_T_14 = _mvin_scale_pixel_repeater_io_resp_bits_laddr_data[13:12]; // @[Scratchpad.scala:387:43]
wire _dmaread_T_3 = _dmaread_T_2 == 2'h0; // @[Scratchpad.scala:521:27]
wire dmaread = _dmaread_T_1 & _dmaread_T_3; // @[Scratchpad.scala:520:{63,117}, :521:27]
wire _zerowrite_T = ~_zero_writer_pixel_repeater_io_resp_bits_laddr_is_acc_addr; // @[Scratchpad.scala:330:44, :527:69]
wire _zerowrite_T_1 = _zero_writer_pixel_repeater_io_resp_valid & _zerowrite_T; // @[Scratchpad.scala:330:44, :527:{66,69}]
wire [1:0] _zerowrite_T_2 = _zero_writer_pixel_repeater_io_resp_bits_laddr_data[13:12]; // @[Scratchpad.scala:330:44]
wire [1:0] _zerowrite_T_11 = _zero_writer_pixel_repeater_io_resp_bits_laddr_data[13:12]; // @[Scratchpad.scala:330:44]
wire [1:0] _zerowrite_T_20 = _zero_writer_pixel_repeater_io_resp_bits_laddr_data[13:12]; // @[Scratchpad.scala:330:44]
wire [1:0] _zerowrite_T_29 = _zero_writer_pixel_repeater_io_resp_bits_laddr_data[13:12]; // @[Scratchpad.scala:330:44]
wire _zerowrite_T_3 = _zerowrite_T_2 == 2'h0; // @[Scratchpad.scala:528:67]
wire _zerowrite_T_4 = _zerowrite_T_1 & _zerowrite_T_3; // @[Scratchpad.scala:527:{66,128}, :528:67]
wire _GEN_6 = _mvin_scale_pixel_repeater_io_resp_valid & _mvin_scale_pixel_repeater_io_resp_bits_last; // @[Scratchpad.scala:387:43, :530:54]
wire _zerowrite_T_5; // @[Scratchpad.scala:530:54]
assign _zerowrite_T_5 = _GEN_6; // @[Scratchpad.scala:530:54]
wire _zerowrite_T_14; // @[Scratchpad.scala:530:54]
assign _zerowrite_T_14 = _GEN_6; // @[Scratchpad.scala:530:54]
wire _zerowrite_T_23; // @[Scratchpad.scala:530:54]
assign _zerowrite_T_23 = _GEN_6; // @[Scratchpad.scala:530:54]
wire _zerowrite_T_32; // @[Scratchpad.scala:530:54]
assign _zerowrite_T_32 = _GEN_6; // @[Scratchpad.scala:530:54]
wire _spad_last_T; // @[Scratchpad.scala:740:65]
assign _spad_last_T = _GEN_6; // @[Scratchpad.scala:530:54, :740:65]
wire _zerowrite_T_40; // @[Scratchpad.scala:753:54]
assign _zerowrite_T_40 = _GEN_6; // @[Scratchpad.scala:530:54, :753:54]
wire _spad_last_T_2; // @[Scratchpad.scala:740:65]
assign _spad_last_T_2 = _GEN_6; // @[Scratchpad.scala:530:54, :740:65]
wire _zerowrite_T_48; // @[Scratchpad.scala:753:54]
assign _zerowrite_T_48 = _GEN_6; // @[Scratchpad.scala:530:54, :753:54]
wire _GEN_7 = _vsm_1_io_resp_valid & _vsm_1_io_resp_bits_last; // @[VectorScalarMultiplier.scala:200:21]
wire _zerowrite_T_6; // @[Scratchpad.scala:530:131]
assign _zerowrite_T_6 = _GEN_7; // @[Scratchpad.scala:530:131]
wire _zerowrite_T_15; // @[Scratchpad.scala:530:131]
assign _zerowrite_T_15 = _GEN_7; // @[Scratchpad.scala:530:131]
wire _zerowrite_T_24; // @[Scratchpad.scala:530:131]
assign _zerowrite_T_24 = _GEN_7; // @[Scratchpad.scala:530:131]
wire _zerowrite_T_33; // @[Scratchpad.scala:530:131]
assign _zerowrite_T_33 = _GEN_7; // @[Scratchpad.scala:530:131]
wire _zerowrite_T_41; // @[Scratchpad.scala:753:131]
assign _zerowrite_T_41 = _GEN_7; // @[Scratchpad.scala:530:131, :753:131]
wire _zerowrite_T_49; // @[Scratchpad.scala:753:131]
assign _zerowrite_T_49 = _GEN_7; // @[Scratchpad.scala:530:131, :753:131]
wire _zerowrite_T_7 = _zerowrite_T_5 | _zerowrite_T_6; // @[Scratchpad.scala:530:{54,102,131}]
wire _zerowrite_T_8 = ~_zerowrite_T_7; // @[Scratchpad.scala:530:{11,102}]
wire zerowrite = _zerowrite_T_4 & _zerowrite_T_8; // @[Scratchpad.scala:527:128, :528:75, :530:11]
wire _bank_ios_0_write_en_T = io_srams_write_0_en_0 | dmaread; // @[Scratchpad.scala:205:9, :520:117, :532:33]
assign _bank_ios_0_write_en_T_1 = _bank_ios_0_write_en_T | zerowrite; // @[Scratchpad.scala:528:75, :532:{33,44}]
assign bank_ios_0_write_en = _bank_ios_0_write_en_T_1; // @[Scratchpad.scala:452:29, :532:44]
wire [11:0] _bank_ios_0_write_addr_T = _mvin_scale_pixel_repeater_io_resp_bits_laddr_data[11:0]; // @[Scratchpad.scala:387:43]
wire [11:0] _bank_ios_1_write_addr_T = _mvin_scale_pixel_repeater_io_resp_bits_laddr_data[11:0]; // @[Scratchpad.scala:387:43]
wire [11:0] _bank_ios_2_write_addr_T = _mvin_scale_pixel_repeater_io_resp_bits_laddr_data[11:0]; // @[Scratchpad.scala:387:43]
wire [11:0] _bank_ios_3_write_addr_T = _mvin_scale_pixel_repeater_io_resp_bits_laddr_data[11:0]; // @[Scratchpad.scala:387:43]
wire [7:0] _bank_ios_0_write_data_T; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_0_write_data_T_1; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_0_write_data_lo_lo_lo = {_bank_ios_0_write_data_T_1, _bank_ios_0_write_data_T}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_0_write_data_T_2; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_0_write_data_T_3; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_0_write_data_lo_lo_hi = {_bank_ios_0_write_data_T_3, _bank_ios_0_write_data_T_2}; // @[Scratchpad.scala:540:72]
wire [31:0] bank_ios_0_write_data_lo_lo = {bank_ios_0_write_data_lo_lo_hi, bank_ios_0_write_data_lo_lo_lo}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_0_write_data_T_4; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_0_write_data_T_5; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_0_write_data_lo_hi_lo = {_bank_ios_0_write_data_T_5, _bank_ios_0_write_data_T_4}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_0_write_data_T_6; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_0_write_data_T_7; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_0_write_data_lo_hi_hi = {_bank_ios_0_write_data_T_7, _bank_ios_0_write_data_T_6}; // @[Scratchpad.scala:540:72]
wire [31:0] bank_ios_0_write_data_lo_hi = {bank_ios_0_write_data_lo_hi_hi, bank_ios_0_write_data_lo_hi_lo}; // @[Scratchpad.scala:540:72]
wire [63:0] bank_ios_0_write_data_lo = {bank_ios_0_write_data_lo_hi, bank_ios_0_write_data_lo_lo}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_0_write_data_T_8; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_0_write_data_T_9; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_0_write_data_hi_lo_lo = {_bank_ios_0_write_data_T_9, _bank_ios_0_write_data_T_8}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_0_write_data_T_10; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_0_write_data_T_11; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_0_write_data_hi_lo_hi = {_bank_ios_0_write_data_T_11, _bank_ios_0_write_data_T_10}; // @[Scratchpad.scala:540:72]
wire [31:0] bank_ios_0_write_data_hi_lo = {bank_ios_0_write_data_hi_lo_hi, bank_ios_0_write_data_hi_lo_lo}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_0_write_data_T_12; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_0_write_data_T_13; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_0_write_data_hi_hi_lo = {_bank_ios_0_write_data_T_13, _bank_ios_0_write_data_T_12}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_0_write_data_T_14; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_0_write_data_T_15; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_0_write_data_hi_hi_hi = {_bank_ios_0_write_data_T_15, _bank_ios_0_write_data_T_14}; // @[Scratchpad.scala:540:72]
wire [31:0] bank_ios_0_write_data_hi_hi = {bank_ios_0_write_data_hi_hi_hi, bank_ios_0_write_data_hi_hi_lo}; // @[Scratchpad.scala:540:72]
wire [63:0] bank_ios_0_write_data_hi = {bank_ios_0_write_data_hi_hi, bank_ios_0_write_data_hi_lo}; // @[Scratchpad.scala:540:72]
wire [127:0] _bank_ios_0_write_data_T_16 = {bank_ios_0_write_data_hi, bank_ios_0_write_data_lo}; // @[Scratchpad.scala:540:72]
wire [11:0] _bank_ios_0_write_addr_T_1 = _zero_writer_pixel_repeater_io_resp_bits_laddr_data[11:0]; // @[Scratchpad.scala:330:44]
wire [11:0] _bank_ios_1_write_addr_T_1 = _zero_writer_pixel_repeater_io_resp_bits_laddr_data[11:0]; // @[Scratchpad.scala:330:44]
wire [11:0] _bank_ios_2_write_addr_T_1 = _zero_writer_pixel_repeater_io_resp_bits_laddr_data[11:0]; // @[Scratchpad.scala:330:44]
wire [11:0] _bank_ios_3_write_addr_T_1 = _zero_writer_pixel_repeater_io_resp_bits_laddr_data[11:0]; // @[Scratchpad.scala:330:44]
assign bank_ios_0_write_addr = io_srams_write_0_en_0 ? io_srams_write_0_addr_0 : dmaread ? _bank_ios_0_write_addr_T : _bank_ios_0_write_addr_T_1; // @[Scratchpad.scala:205:9, :452:29, :520:117, :534:24, :535:26, :538:30, :539:26, :544:32]
assign bank_ios_0_write_data = io_srams_write_0_en_0 ? io_srams_write_0_data_0 : dmaread ? _bank_ios_0_write_data_T_16 : 128'h0; // @[Scratchpad.scala:205:9, :452:29, :520:117, :534:24, :536:26, :538:30, :540:{26,72}, :544:32]
assign bank_ios_0_write_mask_0 = io_srams_write_0_en_0 ? io_srams_write_0_mask_0_0 : dmaread ? _mvin_scale_pixel_repeater_io_resp_bits_mask_0 : _zero_writer_pixel_repeater_io_resp_bits_mask_0; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_0_write_mask_1 = io_srams_write_0_en_0 ? io_srams_write_0_mask_1_0 : dmaread ? _mvin_scale_pixel_repeater_io_resp_bits_mask_1 : _zero_writer_pixel_repeater_io_resp_bits_mask_1; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_0_write_mask_2 = io_srams_write_0_en_0 ? io_srams_write_0_mask_2_0 : dmaread ? _mvin_scale_pixel_repeater_io_resp_bits_mask_2 : _zero_writer_pixel_repeater_io_resp_bits_mask_2; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_0_write_mask_3 = io_srams_write_0_en_0 ? io_srams_write_0_mask_3_0 : dmaread ? _mvin_scale_pixel_repeater_io_resp_bits_mask_3 : _zero_writer_pixel_repeater_io_resp_bits_mask_3; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_0_write_mask_4 = io_srams_write_0_en_0 ? io_srams_write_0_mask_4_0 : dmaread ? _mvin_scale_pixel_repeater_io_resp_bits_mask_4 : _zero_writer_pixel_repeater_io_resp_bits_mask_4; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_0_write_mask_5 = io_srams_write_0_en_0 ? io_srams_write_0_mask_5_0 : dmaread ? _mvin_scale_pixel_repeater_io_resp_bits_mask_5 : _zero_writer_pixel_repeater_io_resp_bits_mask_5; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_0_write_mask_6 = io_srams_write_0_en_0 ? io_srams_write_0_mask_6_0 : dmaread ? _mvin_scale_pixel_repeater_io_resp_bits_mask_6 : _zero_writer_pixel_repeater_io_resp_bits_mask_6; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_0_write_mask_7 = io_srams_write_0_en_0 ? io_srams_write_0_mask_7_0 : dmaread ? _mvin_scale_pixel_repeater_io_resp_bits_mask_7 : _zero_writer_pixel_repeater_io_resp_bits_mask_7; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_0_write_mask_8 = io_srams_write_0_en_0 ? io_srams_write_0_mask_8_0 : dmaread ? _mvin_scale_pixel_repeater_io_resp_bits_mask_8 : _zero_writer_pixel_repeater_io_resp_bits_mask_8; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_0_write_mask_9 = io_srams_write_0_en_0 ? io_srams_write_0_mask_9_0 : dmaread ? _mvin_scale_pixel_repeater_io_resp_bits_mask_9 : _zero_writer_pixel_repeater_io_resp_bits_mask_9; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_0_write_mask_10 = io_srams_write_0_en_0 ? io_srams_write_0_mask_10_0 : dmaread ? _mvin_scale_pixel_repeater_io_resp_bits_mask_10 : _zero_writer_pixel_repeater_io_resp_bits_mask_10; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_0_write_mask_11 = io_srams_write_0_en_0 ? io_srams_write_0_mask_11_0 : dmaread ? _mvin_scale_pixel_repeater_io_resp_bits_mask_11 : _zero_writer_pixel_repeater_io_resp_bits_mask_11; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_0_write_mask_12 = io_srams_write_0_en_0 ? io_srams_write_0_mask_12_0 : dmaread ? _mvin_scale_pixel_repeater_io_resp_bits_mask_12 : _zero_writer_pixel_repeater_io_resp_bits_mask_12; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_0_write_mask_13 = io_srams_write_0_en_0 ? io_srams_write_0_mask_13_0 : dmaread ? _mvin_scale_pixel_repeater_io_resp_bits_mask_13 : _zero_writer_pixel_repeater_io_resp_bits_mask_13; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_0_write_mask_14 = io_srams_write_0_en_0 ? io_srams_write_0_mask_14_0 : dmaread ? _mvin_scale_pixel_repeater_io_resp_bits_mask_14 : _zero_writer_pixel_repeater_io_resp_bits_mask_14; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_0_write_mask_15 = io_srams_write_0_en_0 ? io_srams_write_0_mask_15_0 : dmaread ? _mvin_scale_pixel_repeater_io_resp_bits_mask_15 : _zero_writer_pixel_repeater_io_resp_bits_mask_15; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
wire _dmaread_T_4 = ~_mvin_scale_pixel_repeater_io_resp_bits_tag_is_acc; // @[Scratchpad.scala:387:43, :520:66]
wire _dmaread_T_5 = _mvin_scale_pixel_repeater_io_resp_valid & _dmaread_T_4; // @[Scratchpad.scala:387:43, :520:{63,66}]
wire _dmaread_T_7 = _dmaread_T_6 == 2'h1; // @[Scratchpad.scala:521:27]
wire dmaread_1 = _dmaread_T_5 & _dmaread_T_7; // @[Scratchpad.scala:520:{63,117}, :521:27]
wire _zerowrite_T_9 = ~_zero_writer_pixel_repeater_io_resp_bits_laddr_is_acc_addr; // @[Scratchpad.scala:330:44, :527:69]
wire _zerowrite_T_10 = _zero_writer_pixel_repeater_io_resp_valid & _zerowrite_T_9; // @[Scratchpad.scala:330:44, :527:{66,69}]
wire _zerowrite_T_12 = _zerowrite_T_11 == 2'h1; // @[Scratchpad.scala:528:67]
wire _zerowrite_T_13 = _zerowrite_T_10 & _zerowrite_T_12; // @[Scratchpad.scala:527:{66,128}, :528:67]
wire _zerowrite_T_16 = _zerowrite_T_14 | _zerowrite_T_15; // @[Scratchpad.scala:530:{54,102,131}]
wire _zerowrite_T_17 = ~_zerowrite_T_16; // @[Scratchpad.scala:530:{11,102}]
wire zerowrite_1 = _zerowrite_T_13 & _zerowrite_T_17; // @[Scratchpad.scala:527:128, :528:75, :530:11]
wire _bank_ios_1_write_en_T = io_srams_write_1_en_0 | dmaread_1; // @[Scratchpad.scala:205:9, :520:117, :532:33]
assign _bank_ios_1_write_en_T_1 = _bank_ios_1_write_en_T | zerowrite_1; // @[Scratchpad.scala:528:75, :532:{33,44}]
assign bank_ios_1_write_en = _bank_ios_1_write_en_T_1; // @[Scratchpad.scala:452:29, :532:44]
wire [7:0] _bank_ios_1_write_data_T; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_1_write_data_T_1; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_1_write_data_lo_lo_lo = {_bank_ios_1_write_data_T_1, _bank_ios_1_write_data_T}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_1_write_data_T_2; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_1_write_data_T_3; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_1_write_data_lo_lo_hi = {_bank_ios_1_write_data_T_3, _bank_ios_1_write_data_T_2}; // @[Scratchpad.scala:540:72]
wire [31:0] bank_ios_1_write_data_lo_lo = {bank_ios_1_write_data_lo_lo_hi, bank_ios_1_write_data_lo_lo_lo}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_1_write_data_T_4; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_1_write_data_T_5; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_1_write_data_lo_hi_lo = {_bank_ios_1_write_data_T_5, _bank_ios_1_write_data_T_4}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_1_write_data_T_6; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_1_write_data_T_7; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_1_write_data_lo_hi_hi = {_bank_ios_1_write_data_T_7, _bank_ios_1_write_data_T_6}; // @[Scratchpad.scala:540:72]
wire [31:0] bank_ios_1_write_data_lo_hi = {bank_ios_1_write_data_lo_hi_hi, bank_ios_1_write_data_lo_hi_lo}; // @[Scratchpad.scala:540:72]
wire [63:0] bank_ios_1_write_data_lo = {bank_ios_1_write_data_lo_hi, bank_ios_1_write_data_lo_lo}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_1_write_data_T_8; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_1_write_data_T_9; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_1_write_data_hi_lo_lo = {_bank_ios_1_write_data_T_9, _bank_ios_1_write_data_T_8}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_1_write_data_T_10; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_1_write_data_T_11; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_1_write_data_hi_lo_hi = {_bank_ios_1_write_data_T_11, _bank_ios_1_write_data_T_10}; // @[Scratchpad.scala:540:72]
wire [31:0] bank_ios_1_write_data_hi_lo = {bank_ios_1_write_data_hi_lo_hi, bank_ios_1_write_data_hi_lo_lo}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_1_write_data_T_12; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_1_write_data_T_13; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_1_write_data_hi_hi_lo = {_bank_ios_1_write_data_T_13, _bank_ios_1_write_data_T_12}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_1_write_data_T_14; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_1_write_data_T_15; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_1_write_data_hi_hi_hi = {_bank_ios_1_write_data_T_15, _bank_ios_1_write_data_T_14}; // @[Scratchpad.scala:540:72]
wire [31:0] bank_ios_1_write_data_hi_hi = {bank_ios_1_write_data_hi_hi_hi, bank_ios_1_write_data_hi_hi_lo}; // @[Scratchpad.scala:540:72]
wire [63:0] bank_ios_1_write_data_hi = {bank_ios_1_write_data_hi_hi, bank_ios_1_write_data_hi_lo}; // @[Scratchpad.scala:540:72]
wire [127:0] _bank_ios_1_write_data_T_16 = {bank_ios_1_write_data_hi, bank_ios_1_write_data_lo}; // @[Scratchpad.scala:540:72]
assign bank_ios_1_write_addr = io_srams_write_1_en_0 ? io_srams_write_1_addr_0 : dmaread_1 ? _bank_ios_1_write_addr_T : _bank_ios_1_write_addr_T_1; // @[Scratchpad.scala:205:9, :452:29, :520:117, :534:24, :535:26, :538:30, :539:26, :544:32]
assign bank_ios_1_write_data = io_srams_write_1_en_0 ? io_srams_write_1_data_0 : dmaread_1 ? _bank_ios_1_write_data_T_16 : 128'h0; // @[Scratchpad.scala:205:9, :452:29, :520:117, :534:24, :536:26, :538:30, :540:{26,72}, :544:32]
assign bank_ios_1_write_mask_0 = io_srams_write_1_en_0 ? io_srams_write_1_mask_0_0 : dmaread_1 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_0 : _zero_writer_pixel_repeater_io_resp_bits_mask_0; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_1_write_mask_1 = io_srams_write_1_en_0 ? io_srams_write_1_mask_1_0 : dmaread_1 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_1 : _zero_writer_pixel_repeater_io_resp_bits_mask_1; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_1_write_mask_2 = io_srams_write_1_en_0 ? io_srams_write_1_mask_2_0 : dmaread_1 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_2 : _zero_writer_pixel_repeater_io_resp_bits_mask_2; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_1_write_mask_3 = io_srams_write_1_en_0 ? io_srams_write_1_mask_3_0 : dmaread_1 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_3 : _zero_writer_pixel_repeater_io_resp_bits_mask_3; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_1_write_mask_4 = io_srams_write_1_en_0 ? io_srams_write_1_mask_4_0 : dmaread_1 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_4 : _zero_writer_pixel_repeater_io_resp_bits_mask_4; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_1_write_mask_5 = io_srams_write_1_en_0 ? io_srams_write_1_mask_5_0 : dmaread_1 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_5 : _zero_writer_pixel_repeater_io_resp_bits_mask_5; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_1_write_mask_6 = io_srams_write_1_en_0 ? io_srams_write_1_mask_6_0 : dmaread_1 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_6 : _zero_writer_pixel_repeater_io_resp_bits_mask_6; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_1_write_mask_7 = io_srams_write_1_en_0 ? io_srams_write_1_mask_7_0 : dmaread_1 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_7 : _zero_writer_pixel_repeater_io_resp_bits_mask_7; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_1_write_mask_8 = io_srams_write_1_en_0 ? io_srams_write_1_mask_8_0 : dmaread_1 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_8 : _zero_writer_pixel_repeater_io_resp_bits_mask_8; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_1_write_mask_9 = io_srams_write_1_en_0 ? io_srams_write_1_mask_9_0 : dmaread_1 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_9 : _zero_writer_pixel_repeater_io_resp_bits_mask_9; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_1_write_mask_10 = io_srams_write_1_en_0 ? io_srams_write_1_mask_10_0 : dmaread_1 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_10 : _zero_writer_pixel_repeater_io_resp_bits_mask_10; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_1_write_mask_11 = io_srams_write_1_en_0 ? io_srams_write_1_mask_11_0 : dmaread_1 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_11 : _zero_writer_pixel_repeater_io_resp_bits_mask_11; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_1_write_mask_12 = io_srams_write_1_en_0 ? io_srams_write_1_mask_12_0 : dmaread_1 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_12 : _zero_writer_pixel_repeater_io_resp_bits_mask_12; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_1_write_mask_13 = io_srams_write_1_en_0 ? io_srams_write_1_mask_13_0 : dmaread_1 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_13 : _zero_writer_pixel_repeater_io_resp_bits_mask_13; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_1_write_mask_14 = io_srams_write_1_en_0 ? io_srams_write_1_mask_14_0 : dmaread_1 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_14 : _zero_writer_pixel_repeater_io_resp_bits_mask_14; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_1_write_mask_15 = io_srams_write_1_en_0 ? io_srams_write_1_mask_15_0 : dmaread_1 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_15 : _zero_writer_pixel_repeater_io_resp_bits_mask_15; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
wire _dmaread_T_8 = ~_mvin_scale_pixel_repeater_io_resp_bits_tag_is_acc; // @[Scratchpad.scala:387:43, :520:66]
wire _dmaread_T_9 = _mvin_scale_pixel_repeater_io_resp_valid & _dmaread_T_8; // @[Scratchpad.scala:387:43, :520:{63,66}]
wire _dmaread_T_11 = _dmaread_T_10 == 2'h2; // @[Scratchpad.scala:521:27]
wire dmaread_2 = _dmaread_T_9 & _dmaread_T_11; // @[Scratchpad.scala:520:{63,117}, :521:27]
wire _zerowrite_T_18 = ~_zero_writer_pixel_repeater_io_resp_bits_laddr_is_acc_addr; // @[Scratchpad.scala:330:44, :527:69]
wire _zerowrite_T_19 = _zero_writer_pixel_repeater_io_resp_valid & _zerowrite_T_18; // @[Scratchpad.scala:330:44, :527:{66,69}]
wire _zerowrite_T_21 = _zerowrite_T_20 == 2'h2; // @[Scratchpad.scala:528:67]
wire _zerowrite_T_22 = _zerowrite_T_19 & _zerowrite_T_21; // @[Scratchpad.scala:527:{66,128}, :528:67]
wire _zerowrite_T_25 = _zerowrite_T_23 | _zerowrite_T_24; // @[Scratchpad.scala:530:{54,102,131}]
wire _zerowrite_T_26 = ~_zerowrite_T_25; // @[Scratchpad.scala:530:{11,102}]
wire zerowrite_2 = _zerowrite_T_22 & _zerowrite_T_26; // @[Scratchpad.scala:527:128, :528:75, :530:11]
wire _bank_ios_2_write_en_T = io_srams_write_2_en_0 | dmaread_2; // @[Scratchpad.scala:205:9, :520:117, :532:33]
assign _bank_ios_2_write_en_T_1 = _bank_ios_2_write_en_T | zerowrite_2; // @[Scratchpad.scala:528:75, :532:{33,44}]
assign bank_ios_2_write_en = _bank_ios_2_write_en_T_1; // @[Scratchpad.scala:452:29, :532:44]
wire [7:0] _bank_ios_2_write_data_T; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_2_write_data_T_1; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_2_write_data_lo_lo_lo = {_bank_ios_2_write_data_T_1, _bank_ios_2_write_data_T}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_2_write_data_T_2; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_2_write_data_T_3; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_2_write_data_lo_lo_hi = {_bank_ios_2_write_data_T_3, _bank_ios_2_write_data_T_2}; // @[Scratchpad.scala:540:72]
wire [31:0] bank_ios_2_write_data_lo_lo = {bank_ios_2_write_data_lo_lo_hi, bank_ios_2_write_data_lo_lo_lo}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_2_write_data_T_4; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_2_write_data_T_5; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_2_write_data_lo_hi_lo = {_bank_ios_2_write_data_T_5, _bank_ios_2_write_data_T_4}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_2_write_data_T_6; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_2_write_data_T_7; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_2_write_data_lo_hi_hi = {_bank_ios_2_write_data_T_7, _bank_ios_2_write_data_T_6}; // @[Scratchpad.scala:540:72]
wire [31:0] bank_ios_2_write_data_lo_hi = {bank_ios_2_write_data_lo_hi_hi, bank_ios_2_write_data_lo_hi_lo}; // @[Scratchpad.scala:540:72]
wire [63:0] bank_ios_2_write_data_lo = {bank_ios_2_write_data_lo_hi, bank_ios_2_write_data_lo_lo}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_2_write_data_T_8; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_2_write_data_T_9; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_2_write_data_hi_lo_lo = {_bank_ios_2_write_data_T_9, _bank_ios_2_write_data_T_8}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_2_write_data_T_10; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_2_write_data_T_11; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_2_write_data_hi_lo_hi = {_bank_ios_2_write_data_T_11, _bank_ios_2_write_data_T_10}; // @[Scratchpad.scala:540:72]
wire [31:0] bank_ios_2_write_data_hi_lo = {bank_ios_2_write_data_hi_lo_hi, bank_ios_2_write_data_hi_lo_lo}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_2_write_data_T_12; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_2_write_data_T_13; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_2_write_data_hi_hi_lo = {_bank_ios_2_write_data_T_13, _bank_ios_2_write_data_T_12}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_2_write_data_T_14; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_2_write_data_T_15; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_2_write_data_hi_hi_hi = {_bank_ios_2_write_data_T_15, _bank_ios_2_write_data_T_14}; // @[Scratchpad.scala:540:72]
wire [31:0] bank_ios_2_write_data_hi_hi = {bank_ios_2_write_data_hi_hi_hi, bank_ios_2_write_data_hi_hi_lo}; // @[Scratchpad.scala:540:72]
wire [63:0] bank_ios_2_write_data_hi = {bank_ios_2_write_data_hi_hi, bank_ios_2_write_data_hi_lo}; // @[Scratchpad.scala:540:72]
wire [127:0] _bank_ios_2_write_data_T_16 = {bank_ios_2_write_data_hi, bank_ios_2_write_data_lo}; // @[Scratchpad.scala:540:72]
assign bank_ios_2_write_addr = io_srams_write_2_en_0 ? io_srams_write_2_addr_0 : dmaread_2 ? _bank_ios_2_write_addr_T : _bank_ios_2_write_addr_T_1; // @[Scratchpad.scala:205:9, :452:29, :520:117, :534:24, :535:26, :538:30, :539:26, :544:32]
assign bank_ios_2_write_data = io_srams_write_2_en_0 ? io_srams_write_2_data_0 : dmaread_2 ? _bank_ios_2_write_data_T_16 : 128'h0; // @[Scratchpad.scala:205:9, :452:29, :520:117, :534:24, :536:26, :538:30, :540:{26,72}, :544:32]
assign bank_ios_2_write_mask_0 = io_srams_write_2_en_0 ? io_srams_write_2_mask_0_0 : dmaread_2 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_0 : _zero_writer_pixel_repeater_io_resp_bits_mask_0; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_2_write_mask_1 = io_srams_write_2_en_0 ? io_srams_write_2_mask_1_0 : dmaread_2 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_1 : _zero_writer_pixel_repeater_io_resp_bits_mask_1; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_2_write_mask_2 = io_srams_write_2_en_0 ? io_srams_write_2_mask_2_0 : dmaread_2 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_2 : _zero_writer_pixel_repeater_io_resp_bits_mask_2; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_2_write_mask_3 = io_srams_write_2_en_0 ? io_srams_write_2_mask_3_0 : dmaread_2 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_3 : _zero_writer_pixel_repeater_io_resp_bits_mask_3; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_2_write_mask_4 = io_srams_write_2_en_0 ? io_srams_write_2_mask_4_0 : dmaread_2 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_4 : _zero_writer_pixel_repeater_io_resp_bits_mask_4; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_2_write_mask_5 = io_srams_write_2_en_0 ? io_srams_write_2_mask_5_0 : dmaread_2 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_5 : _zero_writer_pixel_repeater_io_resp_bits_mask_5; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_2_write_mask_6 = io_srams_write_2_en_0 ? io_srams_write_2_mask_6_0 : dmaread_2 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_6 : _zero_writer_pixel_repeater_io_resp_bits_mask_6; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_2_write_mask_7 = io_srams_write_2_en_0 ? io_srams_write_2_mask_7_0 : dmaread_2 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_7 : _zero_writer_pixel_repeater_io_resp_bits_mask_7; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_2_write_mask_8 = io_srams_write_2_en_0 ? io_srams_write_2_mask_8_0 : dmaread_2 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_8 : _zero_writer_pixel_repeater_io_resp_bits_mask_8; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_2_write_mask_9 = io_srams_write_2_en_0 ? io_srams_write_2_mask_9_0 : dmaread_2 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_9 : _zero_writer_pixel_repeater_io_resp_bits_mask_9; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_2_write_mask_10 = io_srams_write_2_en_0 ? io_srams_write_2_mask_10_0 : dmaread_2 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_10 : _zero_writer_pixel_repeater_io_resp_bits_mask_10; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_2_write_mask_11 = io_srams_write_2_en_0 ? io_srams_write_2_mask_11_0 : dmaread_2 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_11 : _zero_writer_pixel_repeater_io_resp_bits_mask_11; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_2_write_mask_12 = io_srams_write_2_en_0 ? io_srams_write_2_mask_12_0 : dmaread_2 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_12 : _zero_writer_pixel_repeater_io_resp_bits_mask_12; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_2_write_mask_13 = io_srams_write_2_en_0 ? io_srams_write_2_mask_13_0 : dmaread_2 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_13 : _zero_writer_pixel_repeater_io_resp_bits_mask_13; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_2_write_mask_14 = io_srams_write_2_en_0 ? io_srams_write_2_mask_14_0 : dmaread_2 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_14 : _zero_writer_pixel_repeater_io_resp_bits_mask_14; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_2_write_mask_15 = io_srams_write_2_en_0 ? io_srams_write_2_mask_15_0 : dmaread_2 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_15 : _zero_writer_pixel_repeater_io_resp_bits_mask_15; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
wire _dmaread_T_12 = ~_mvin_scale_pixel_repeater_io_resp_bits_tag_is_acc; // @[Scratchpad.scala:387:43, :520:66]
wire _dmaread_T_13 = _mvin_scale_pixel_repeater_io_resp_valid & _dmaread_T_12; // @[Scratchpad.scala:387:43, :520:{63,66}]
wire _dmaread_T_15 = &_dmaread_T_14; // @[Scratchpad.scala:521:27]
wire dmaread_3 = _dmaread_T_13 & _dmaread_T_15; // @[Scratchpad.scala:520:{63,117}, :521:27]
wire _zerowrite_T_27 = ~_zero_writer_pixel_repeater_io_resp_bits_laddr_is_acc_addr; // @[Scratchpad.scala:330:44, :527:69]
wire _zerowrite_T_28 = _zero_writer_pixel_repeater_io_resp_valid & _zerowrite_T_27; // @[Scratchpad.scala:330:44, :527:{66,69}]
wire _zerowrite_T_30 = &_zerowrite_T_29; // @[Scratchpad.scala:528:67]
wire _zerowrite_T_31 = _zerowrite_T_28 & _zerowrite_T_30; // @[Scratchpad.scala:527:{66,128}, :528:67]
wire _zerowrite_T_34 = _zerowrite_T_32 | _zerowrite_T_33; // @[Scratchpad.scala:530:{54,102,131}]
wire _zerowrite_T_35 = ~_zerowrite_T_34; // @[Scratchpad.scala:530:{11,102}]
wire zerowrite_3 = _zerowrite_T_31 & _zerowrite_T_35; // @[Scratchpad.scala:527:128, :528:75, :530:11]
wire _bank_ios_3_write_en_T = io_srams_write_3_en_0 | dmaread_3; // @[Scratchpad.scala:205:9, :520:117, :532:33]
assign _bank_ios_3_write_en_T_1 = _bank_ios_3_write_en_T | zerowrite_3; // @[Scratchpad.scala:528:75, :532:{33,44}]
assign bank_ios_3_write_en = _bank_ios_3_write_en_T_1; // @[Scratchpad.scala:452:29, :532:44]
wire [7:0] _bank_ios_3_write_data_T; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_3_write_data_T_1; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_3_write_data_lo_lo_lo = {_bank_ios_3_write_data_T_1, _bank_ios_3_write_data_T}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_3_write_data_T_2; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_3_write_data_T_3; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_3_write_data_lo_lo_hi = {_bank_ios_3_write_data_T_3, _bank_ios_3_write_data_T_2}; // @[Scratchpad.scala:540:72]
wire [31:0] bank_ios_3_write_data_lo_lo = {bank_ios_3_write_data_lo_lo_hi, bank_ios_3_write_data_lo_lo_lo}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_3_write_data_T_4; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_3_write_data_T_5; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_3_write_data_lo_hi_lo = {_bank_ios_3_write_data_T_5, _bank_ios_3_write_data_T_4}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_3_write_data_T_6; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_3_write_data_T_7; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_3_write_data_lo_hi_hi = {_bank_ios_3_write_data_T_7, _bank_ios_3_write_data_T_6}; // @[Scratchpad.scala:540:72]
wire [31:0] bank_ios_3_write_data_lo_hi = {bank_ios_3_write_data_lo_hi_hi, bank_ios_3_write_data_lo_hi_lo}; // @[Scratchpad.scala:540:72]
wire [63:0] bank_ios_3_write_data_lo = {bank_ios_3_write_data_lo_hi, bank_ios_3_write_data_lo_lo}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_3_write_data_T_8; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_3_write_data_T_9; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_3_write_data_hi_lo_lo = {_bank_ios_3_write_data_T_9, _bank_ios_3_write_data_T_8}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_3_write_data_T_10; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_3_write_data_T_11; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_3_write_data_hi_lo_hi = {_bank_ios_3_write_data_T_11, _bank_ios_3_write_data_T_10}; // @[Scratchpad.scala:540:72]
wire [31:0] bank_ios_3_write_data_hi_lo = {bank_ios_3_write_data_hi_lo_hi, bank_ios_3_write_data_hi_lo_lo}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_3_write_data_T_12; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_3_write_data_T_13; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_3_write_data_hi_hi_lo = {_bank_ios_3_write_data_T_13, _bank_ios_3_write_data_T_12}; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_3_write_data_T_14; // @[Scratchpad.scala:540:72]
wire [7:0] _bank_ios_3_write_data_T_15; // @[Scratchpad.scala:540:72]
wire [15:0] bank_ios_3_write_data_hi_hi_hi = {_bank_ios_3_write_data_T_15, _bank_ios_3_write_data_T_14}; // @[Scratchpad.scala:540:72]
wire [31:0] bank_ios_3_write_data_hi_hi = {bank_ios_3_write_data_hi_hi_hi, bank_ios_3_write_data_hi_hi_lo}; // @[Scratchpad.scala:540:72]
wire [63:0] bank_ios_3_write_data_hi = {bank_ios_3_write_data_hi_hi, bank_ios_3_write_data_hi_lo}; // @[Scratchpad.scala:540:72]
wire [127:0] _bank_ios_3_write_data_T_16 = {bank_ios_3_write_data_hi, bank_ios_3_write_data_lo}; // @[Scratchpad.scala:540:72]
assign bank_ios_3_write_addr = io_srams_write_3_en_0 ? io_srams_write_3_addr_0 : dmaread_3 ? _bank_ios_3_write_addr_T : _bank_ios_3_write_addr_T_1; // @[Scratchpad.scala:205:9, :452:29, :520:117, :534:24, :535:26, :538:30, :539:26, :544:32]
assign bank_ios_3_write_data = io_srams_write_3_en_0 ? io_srams_write_3_data_0 : dmaread_3 ? _bank_ios_3_write_data_T_16 : 128'h0; // @[Scratchpad.scala:205:9, :452:29, :520:117, :534:24, :536:26, :538:30, :540:{26,72}, :544:32]
assign bank_ios_3_write_mask_0 = io_srams_write_3_en_0 ? io_srams_write_3_mask_0_0 : dmaread_3 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_0 : _zero_writer_pixel_repeater_io_resp_bits_mask_0; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_3_write_mask_1 = io_srams_write_3_en_0 ? io_srams_write_3_mask_1_0 : dmaread_3 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_1 : _zero_writer_pixel_repeater_io_resp_bits_mask_1; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_3_write_mask_2 = io_srams_write_3_en_0 ? io_srams_write_3_mask_2_0 : dmaread_3 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_2 : _zero_writer_pixel_repeater_io_resp_bits_mask_2; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_3_write_mask_3 = io_srams_write_3_en_0 ? io_srams_write_3_mask_3_0 : dmaread_3 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_3 : _zero_writer_pixel_repeater_io_resp_bits_mask_3; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_3_write_mask_4 = io_srams_write_3_en_0 ? io_srams_write_3_mask_4_0 : dmaread_3 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_4 : _zero_writer_pixel_repeater_io_resp_bits_mask_4; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_3_write_mask_5 = io_srams_write_3_en_0 ? io_srams_write_3_mask_5_0 : dmaread_3 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_5 : _zero_writer_pixel_repeater_io_resp_bits_mask_5; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_3_write_mask_6 = io_srams_write_3_en_0 ? io_srams_write_3_mask_6_0 : dmaread_3 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_6 : _zero_writer_pixel_repeater_io_resp_bits_mask_6; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_3_write_mask_7 = io_srams_write_3_en_0 ? io_srams_write_3_mask_7_0 : dmaread_3 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_7 : _zero_writer_pixel_repeater_io_resp_bits_mask_7; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_3_write_mask_8 = io_srams_write_3_en_0 ? io_srams_write_3_mask_8_0 : dmaread_3 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_8 : _zero_writer_pixel_repeater_io_resp_bits_mask_8; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_3_write_mask_9 = io_srams_write_3_en_0 ? io_srams_write_3_mask_9_0 : dmaread_3 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_9 : _zero_writer_pixel_repeater_io_resp_bits_mask_9; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_3_write_mask_10 = io_srams_write_3_en_0 ? io_srams_write_3_mask_10_0 : dmaread_3 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_10 : _zero_writer_pixel_repeater_io_resp_bits_mask_10; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_3_write_mask_11 = io_srams_write_3_en_0 ? io_srams_write_3_mask_11_0 : dmaread_3 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_11 : _zero_writer_pixel_repeater_io_resp_bits_mask_11; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_3_write_mask_12 = io_srams_write_3_en_0 ? io_srams_write_3_mask_12_0 : dmaread_3 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_12 : _zero_writer_pixel_repeater_io_resp_bits_mask_12; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_3_write_mask_13 = io_srams_write_3_en_0 ? io_srams_write_3_mask_13_0 : dmaread_3 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_13 : _zero_writer_pixel_repeater_io_resp_bits_mask_13; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_3_write_mask_14 = io_srams_write_3_en_0 ? io_srams_write_3_mask_14_0 : dmaread_3 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_14 : _zero_writer_pixel_repeater_io_resp_bits_mask_14; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
assign bank_ios_3_write_mask_15 = io_srams_write_3_en_0 ? io_srams_write_3_mask_15_0 : dmaread_3 ? _mvin_scale_pixel_repeater_io_resp_bits_mask_15 : _zero_writer_pixel_repeater_io_resp_bits_mask_15; // @[Scratchpad.scala:205:9, :330:44, :387:43, :452:29, :520:117, :534:24, :537:26, :538:30, :541:26, :544:32]
wire _acc_norm_unit_out_ready_T; // @[Scratchpad.scala:596:59]
wire [31:0] acc_norm_unit_out_bits_acc_read_resp_data_0_0; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_acc_read_resp_data_1_0; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_acc_read_resp_data_2_0; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_acc_read_resp_data_3_0; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_acc_read_resp_data_4_0; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_acc_read_resp_data_5_0; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_acc_read_resp_data_6_0; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_acc_read_resp_data_7_0; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_acc_read_resp_data_8_0; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_acc_read_resp_data_9_0; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_acc_read_resp_data_10_0; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_acc_read_resp_data_11_0; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_acc_read_resp_data_12_0; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_acc_read_resp_data_13_0; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_acc_read_resp_data_14_0; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_acc_read_resp_data_15_0; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_acc_read_resp_scale_bits; // @[Normalizer.scala:809:38]
wire acc_norm_unit_out_bits_acc_read_resp_fromDMA; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_acc_read_resp_igelu_qb; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_acc_read_resp_igelu_qc; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_acc_read_resp_iexp_qln2; // @[Normalizer.scala:809:38]
wire [31:0] acc_norm_unit_out_bits_acc_read_resp_iexp_qln2_inv; // @[Normalizer.scala:809:38]
wire [2:0] acc_norm_unit_out_bits_acc_read_resp_act; // @[Normalizer.scala:809:38]
wire [1:0] acc_norm_unit_out_bits_acc_read_resp_acc_bank_id; // @[Normalizer.scala:809:38]
wire acc_norm_unit_out_ready; // @[Normalizer.scala:809:38]
wire acc_norm_unit_out_valid; // @[Normalizer.scala:809:38]
wire _acc_waiting_to_be_scaled_T_1 = _acc_waiting_to_be_scaled_T & _write_scale_q_io_deq_bits_laddr_read_full_acc_row; // @[Scratchpad.scala:253:31]
wire _acc_waiting_to_be_scaled_T_2 = &_write_scale_q_io_deq_bits_laddr_data; // @[Scratchpad.scala:253:31]
wire _acc_waiting_to_be_scaled_T_3 = _acc_waiting_to_be_scaled_T_1 & _acc_waiting_to_be_scaled_T_2; // @[LocalAddr.scala:43:{62,83,91}]
wire _acc_waiting_to_be_scaled_T_4; // @[LocalAddr.scala:44:48]
wire _acc_waiting_to_be_scaled_T_5 = _acc_waiting_to_be_scaled_T_3 & _acc_waiting_to_be_scaled_T_4; // @[LocalAddr.scala:43:{83,96}, :44:48]
wire _acc_waiting_to_be_scaled_T_6 = ~_acc_waiting_to_be_scaled_T_5; // @[Scratchpad.scala:592:7]
wire _acc_waiting_to_be_scaled_T_7 = _write_scale_q_io_deq_valid & _acc_waiting_to_be_scaled_T_6; // @[Scratchpad.scala:253:31, :591:63, :592:7]
wire _acc_waiting_to_be_scaled_T_8 = _acc_waiting_to_be_scaled_T_7 & _write_scale_q_io_deq_bits_laddr_is_acc_addr; // @[Scratchpad.scala:253:31, :591:63, :592:53]
wire acc_waiting_to_be_scaled = _acc_waiting_to_be_scaled_T_8 & _write_issue_q_io_enq_ready; // @[Scratchpad.scala:254:31, :592:53, :593:51]
assign _acc_norm_unit_out_ready_T = _acc_scale_unit_io_in_ready & acc_waiting_to_be_scaled; // @[Scratchpad.scala:578:32, :593:51, :596:59]
assign acc_norm_unit_out_ready = _acc_norm_unit_out_ready_T; // @[Scratchpad.scala:596:59]
wire _acc_scale_unit_io_in_valid_T = acc_norm_unit_out_valid & acc_waiting_to_be_scaled; // @[Scratchpad.scala:593:51, :597:59]
wire _GEN_8 = _acc_scale_unit_io_in_ready & _acc_scale_unit_io_in_valid_T | _acc_waiting_to_be_scaled_T & _write_scale_q_io_deq_bits_laddr_read_full_acc_row & (&_write_scale_q_io_deq_bits_laddr_data) & _write_scale_q_io_deq_bits_laddr_garbage_bit | ~_write_scale_q_io_deq_bits_laddr_is_acc_addr; // @[Decoupled.scala:51:35]
wire _dma_resp_ready_T = _writer_io_req_ready & _write_issue_q_io_deq_bits_laddr_is_acc_addr; // @[Scratchpad.scala:192:26, :254:31, :607:34]
wire _dma_resp_ready_T_2 = _dma_resp_ready_T_1 & _write_issue_q_io_deq_bits_laddr_read_full_acc_row; // @[Scratchpad.scala:254:31]
wire _dma_resp_ready_T_3 = &_write_issue_q_io_deq_bits_laddr_data; // @[Scratchpad.scala:254:31]
wire _dma_resp_ready_T_4 = _dma_resp_ready_T_2 & _dma_resp_ready_T_3; // @[LocalAddr.scala:43:{62,83,91}]
wire _dma_resp_ready_T_5; // @[LocalAddr.scala:44:48]
wire _dma_resp_ready_T_6 = _dma_resp_ready_T_4 & _dma_resp_ready_T_5; // @[LocalAddr.scala:43:{83,96}, :44:48]
wire _dma_resp_ready_T_7 = ~_dma_resp_ready_T_6; // @[Scratchpad.scala:609:9]
wire dma_resp_ready = _dma_resp_ready_T & _dma_resp_ready_T_7; // @[Scratchpad.scala:607:34, :608:53, :609:9]
wire _T_103 = _acc_scale_unit_io_out_bits_fromDMA & dma_resp_ready; // @[Scratchpad.scala:578:32, :608:53, :611:46]
assign writeData_valid = _T_103 ? _acc_scale_unit_io_out_valid : _T_101 | _T_99 | _T_97 | _p_io_out_ready_T_12 & _dma_read_pipe_p_io_out_valid | _writeData_valid_T_5; // @[Decoupled.scala:51:35]
wire [7:0] _writeData_bits_T; // @[Scratchpad.scala:615:58]
wire [7:0] _writeData_bits_T_1; // @[Scratchpad.scala:615:58]
wire [15:0] writeData_bits_lo_lo_lo = {_writeData_bits_T_1, _writeData_bits_T}; // @[Scratchpad.scala:615:58]
wire [7:0] _writeData_bits_T_2; // @[Scratchpad.scala:615:58]
wire [7:0] _writeData_bits_T_3; // @[Scratchpad.scala:615:58]
wire [15:0] writeData_bits_lo_lo_hi = {_writeData_bits_T_3, _writeData_bits_T_2}; // @[Scratchpad.scala:615:58]
wire [31:0] writeData_bits_lo_lo = {writeData_bits_lo_lo_hi, writeData_bits_lo_lo_lo}; // @[Scratchpad.scala:615:58]
wire [7:0] _writeData_bits_T_4; // @[Scratchpad.scala:615:58]
wire [7:0] _writeData_bits_T_5; // @[Scratchpad.scala:615:58]
wire [15:0] writeData_bits_lo_hi_lo = {_writeData_bits_T_5, _writeData_bits_T_4}; // @[Scratchpad.scala:615:58]
wire [7:0] _writeData_bits_T_6; // @[Scratchpad.scala:615:58]
wire [7:0] _writeData_bits_T_7; // @[Scratchpad.scala:615:58]
wire [15:0] writeData_bits_lo_hi_hi = {_writeData_bits_T_7, _writeData_bits_T_6}; // @[Scratchpad.scala:615:58]
wire [31:0] writeData_bits_lo_hi = {writeData_bits_lo_hi_hi, writeData_bits_lo_hi_lo}; // @[Scratchpad.scala:615:58]
wire [63:0] writeData_bits_lo = {writeData_bits_lo_hi, writeData_bits_lo_lo}; // @[Scratchpad.scala:615:58]
wire [7:0] _writeData_bits_T_8; // @[Scratchpad.scala:615:58]
wire [7:0] _writeData_bits_T_9; // @[Scratchpad.scala:615:58]
wire [15:0] writeData_bits_hi_lo_lo = {_writeData_bits_T_9, _writeData_bits_T_8}; // @[Scratchpad.scala:615:58]
wire [7:0] _writeData_bits_T_10; // @[Scratchpad.scala:615:58]
wire [7:0] _writeData_bits_T_11; // @[Scratchpad.scala:615:58]
wire [15:0] writeData_bits_hi_lo_hi = {_writeData_bits_T_11, _writeData_bits_T_10}; // @[Scratchpad.scala:615:58]
wire [31:0] writeData_bits_hi_lo = {writeData_bits_hi_lo_hi, writeData_bits_hi_lo_lo}; // @[Scratchpad.scala:615:58]
wire [7:0] _writeData_bits_T_12; // @[Scratchpad.scala:615:58]
wire [7:0] _writeData_bits_T_13; // @[Scratchpad.scala:615:58]
wire [15:0] writeData_bits_hi_hi_lo = {_writeData_bits_T_13, _writeData_bits_T_12}; // @[Scratchpad.scala:615:58]
wire [7:0] _writeData_bits_T_14; // @[Scratchpad.scala:615:58]
wire [7:0] _writeData_bits_T_15; // @[Scratchpad.scala:615:58]
wire [15:0] writeData_bits_hi_hi_hi = {_writeData_bits_T_15, _writeData_bits_T_14}; // @[Scratchpad.scala:615:58]
wire [31:0] writeData_bits_hi_hi = {writeData_bits_hi_hi_hi, writeData_bits_hi_hi_lo}; // @[Scratchpad.scala:615:58]
wire [63:0] writeData_bits_hi = {writeData_bits_hi_hi, writeData_bits_hi_lo}; // @[Scratchpad.scala:615:58]
wire [127:0] _writeData_bits_T_16 = {writeData_bits_hi, writeData_bits_lo}; // @[Scratchpad.scala:615:58]
assign writeData_bits = {384'h0, _T_103 ? _writeData_bits_T_16 : _T_101 ? _dma_read_pipe_p_3_io_out_bits_data : _T_99 ? _dma_read_pipe_p_2_io_out_bits_data : _T_97 ? _dma_read_pipe_p_1_io_out_bits_data : _dma_read_pipe_p_io_out_bits_data}; // @[Decoupled.scala:51:35]
wire [31:0] _fullAccWriteData_T; // @[Scratchpad.scala:616:64]
wire [31:0] _fullAccWriteData_T_1; // @[Scratchpad.scala:616:64]
wire [63:0] fullAccWriteData_lo_lo_lo = {_fullAccWriteData_T_1, _fullAccWriteData_T}; // @[Scratchpad.scala:616:64]
wire [31:0] _fullAccWriteData_T_2; // @[Scratchpad.scala:616:64]
wire [31:0] _fullAccWriteData_T_3; // @[Scratchpad.scala:616:64]
wire [63:0] fullAccWriteData_lo_lo_hi = {_fullAccWriteData_T_3, _fullAccWriteData_T_2}; // @[Scratchpad.scala:616:64]
wire [127:0] fullAccWriteData_lo_lo = {fullAccWriteData_lo_lo_hi, fullAccWriteData_lo_lo_lo}; // @[Scratchpad.scala:616:64]
wire [31:0] _fullAccWriteData_T_4; // @[Scratchpad.scala:616:64]
wire [31:0] _fullAccWriteData_T_5; // @[Scratchpad.scala:616:64]
wire [63:0] fullAccWriteData_lo_hi_lo = {_fullAccWriteData_T_5, _fullAccWriteData_T_4}; // @[Scratchpad.scala:616:64]
wire [31:0] _fullAccWriteData_T_6; // @[Scratchpad.scala:616:64]
wire [31:0] _fullAccWriteData_T_7; // @[Scratchpad.scala:616:64]
wire [63:0] fullAccWriteData_lo_hi_hi = {_fullAccWriteData_T_7, _fullAccWriteData_T_6}; // @[Scratchpad.scala:616:64]
wire [127:0] fullAccWriteData_lo_hi = {fullAccWriteData_lo_hi_hi, fullAccWriteData_lo_hi_lo}; // @[Scratchpad.scala:616:64]
wire [255:0] fullAccWriteData_lo = {fullAccWriteData_lo_hi, fullAccWriteData_lo_lo}; // @[Scratchpad.scala:616:64]
wire [31:0] _fullAccWriteData_T_8; // @[Scratchpad.scala:616:64]
wire [31:0] _fullAccWriteData_T_9; // @[Scratchpad.scala:616:64]
wire [63:0] fullAccWriteData_hi_lo_lo = {_fullAccWriteData_T_9, _fullAccWriteData_T_8}; // @[Scratchpad.scala:616:64]
wire [31:0] _fullAccWriteData_T_10; // @[Scratchpad.scala:616:64]
wire [31:0] _fullAccWriteData_T_11; // @[Scratchpad.scala:616:64]
wire [63:0] fullAccWriteData_hi_lo_hi = {_fullAccWriteData_T_11, _fullAccWriteData_T_10}; // @[Scratchpad.scala:616:64]
wire [127:0] fullAccWriteData_hi_lo = {fullAccWriteData_hi_lo_hi, fullAccWriteData_hi_lo_lo}; // @[Scratchpad.scala:616:64]
wire [31:0] _fullAccWriteData_T_12; // @[Scratchpad.scala:616:64]
wire [31:0] _fullAccWriteData_T_13; // @[Scratchpad.scala:616:64]
wire [63:0] fullAccWriteData_hi_hi_lo = {_fullAccWriteData_T_13, _fullAccWriteData_T_12}; // @[Scratchpad.scala:616:64]
wire [31:0] _fullAccWriteData_T_14; // @[Scratchpad.scala:616:64]
wire [31:0] _fullAccWriteData_T_15; // @[Scratchpad.scala:616:64]
wire [63:0] fullAccWriteData_hi_hi_hi = {_fullAccWriteData_T_15, _fullAccWriteData_T_14}; // @[Scratchpad.scala:616:64]
wire [127:0] fullAccWriteData_hi_hi = {fullAccWriteData_hi_hi_hi, fullAccWriteData_hi_hi_lo}; // @[Scratchpad.scala:616:64]
wire [255:0] fullAccWriteData_hi = {fullAccWriteData_hi_hi, fullAccWriteData_hi_lo}; // @[Scratchpad.scala:616:64]
assign _fullAccWriteData_T_16 = {fullAccWriteData_hi, fullAccWriteData_lo}; // @[Scratchpad.scala:616:64]
assign fullAccWriteData = _fullAccWriteData_T_16; // @[Scratchpad.scala:288:32, :616:64]
assign io_acc_read_resp_0_bits_full_data_0_0_0 = _acc_scale_unit_io_out_bits_full_data_0_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_1_bits_full_data_0_0_0 = _acc_scale_unit_io_out_bits_full_data_0_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_0_bits_full_data_1_0_0 = _acc_scale_unit_io_out_bits_full_data_1_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_1_bits_full_data_1_0_0 = _acc_scale_unit_io_out_bits_full_data_1_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_0_bits_full_data_2_0_0 = _acc_scale_unit_io_out_bits_full_data_2_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_1_bits_full_data_2_0_0 = _acc_scale_unit_io_out_bits_full_data_2_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_0_bits_full_data_3_0_0 = _acc_scale_unit_io_out_bits_full_data_3_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_1_bits_full_data_3_0_0 = _acc_scale_unit_io_out_bits_full_data_3_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_0_bits_full_data_4_0_0 = _acc_scale_unit_io_out_bits_full_data_4_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_1_bits_full_data_4_0_0 = _acc_scale_unit_io_out_bits_full_data_4_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_0_bits_full_data_5_0_0 = _acc_scale_unit_io_out_bits_full_data_5_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_1_bits_full_data_5_0_0 = _acc_scale_unit_io_out_bits_full_data_5_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_0_bits_full_data_6_0_0 = _acc_scale_unit_io_out_bits_full_data_6_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_1_bits_full_data_6_0_0 = _acc_scale_unit_io_out_bits_full_data_6_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_0_bits_full_data_7_0_0 = _acc_scale_unit_io_out_bits_full_data_7_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_1_bits_full_data_7_0_0 = _acc_scale_unit_io_out_bits_full_data_7_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_0_bits_full_data_8_0_0 = _acc_scale_unit_io_out_bits_full_data_8_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_1_bits_full_data_8_0_0 = _acc_scale_unit_io_out_bits_full_data_8_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_0_bits_full_data_9_0_0 = _acc_scale_unit_io_out_bits_full_data_9_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_1_bits_full_data_9_0_0 = _acc_scale_unit_io_out_bits_full_data_9_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_0_bits_full_data_10_0_0 = _acc_scale_unit_io_out_bits_full_data_10_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_1_bits_full_data_10_0_0 = _acc_scale_unit_io_out_bits_full_data_10_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_0_bits_full_data_11_0_0 = _acc_scale_unit_io_out_bits_full_data_11_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_1_bits_full_data_11_0_0 = _acc_scale_unit_io_out_bits_full_data_11_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_0_bits_full_data_12_0_0 = _acc_scale_unit_io_out_bits_full_data_12_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_1_bits_full_data_12_0_0 = _acc_scale_unit_io_out_bits_full_data_12_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_0_bits_full_data_13_0_0 = _acc_scale_unit_io_out_bits_full_data_13_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_1_bits_full_data_13_0_0 = _acc_scale_unit_io_out_bits_full_data_13_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_0_bits_full_data_14_0_0 = _acc_scale_unit_io_out_bits_full_data_14_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_1_bits_full_data_14_0_0 = _acc_scale_unit_io_out_bits_full_data_14_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_0_bits_full_data_15_0_0 = _acc_scale_unit_io_out_bits_full_data_15_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
assign io_acc_read_resp_1_bits_full_data_15_0_0 = _acc_scale_unit_io_out_bits_full_data_15_0[7:0]; // @[Scratchpad.scala:205:9, :578:32, :621:33]
wire [31:0] _GEN_9 = {{24{_acc_scale_unit_io_out_bits_data_0_0[7]}}, _acc_scale_unit_io_out_bits_data_0_0}; // @[Scratchpad.scala:578:32, :621:33]
assign io_acc_read_resp_0_bits_data_0_0_0 = _GEN_9; // @[Scratchpad.scala:205:9, :621:33]
assign io_acc_read_resp_1_bits_data_0_0_0 = _GEN_9; // @[Scratchpad.scala:205:9, :621:33]
wire [31:0] _GEN_10 = {{24{_acc_scale_unit_io_out_bits_data_1_0[7]}}, _acc_scale_unit_io_out_bits_data_1_0}; // @[Scratchpad.scala:578:32, :621:33]
assign io_acc_read_resp_0_bits_data_1_0_0 = _GEN_10; // @[Scratchpad.scala:205:9, :621:33]
assign io_acc_read_resp_1_bits_data_1_0_0 = _GEN_10; // @[Scratchpad.scala:205:9, :621:33]
wire [31:0] _GEN_11 = {{24{_acc_scale_unit_io_out_bits_data_2_0[7]}}, _acc_scale_unit_io_out_bits_data_2_0}; // @[Scratchpad.scala:578:32, :621:33]
assign io_acc_read_resp_0_bits_data_2_0_0 = _GEN_11; // @[Scratchpad.scala:205:9, :621:33]
assign io_acc_read_resp_1_bits_data_2_0_0 = _GEN_11; // @[Scratchpad.scala:205:9, :621:33]
wire [31:0] _GEN_12 = {{24{_acc_scale_unit_io_out_bits_data_3_0[7]}}, _acc_scale_unit_io_out_bits_data_3_0}; // @[Scratchpad.scala:578:32, :621:33]
assign io_acc_read_resp_0_bits_data_3_0_0 = _GEN_12; // @[Scratchpad.scala:205:9, :621:33]
assign io_acc_read_resp_1_bits_data_3_0_0 = _GEN_12; // @[Scratchpad.scala:205:9, :621:33]
wire [31:0] _GEN_13 = {{24{_acc_scale_unit_io_out_bits_data_4_0[7]}}, _acc_scale_unit_io_out_bits_data_4_0}; // @[Scratchpad.scala:578:32, :621:33]
assign io_acc_read_resp_0_bits_data_4_0_0 = _GEN_13; // @[Scratchpad.scala:205:9, :621:33]
assign io_acc_read_resp_1_bits_data_4_0_0 = _GEN_13; // @[Scratchpad.scala:205:9, :621:33]
wire [31:0] _GEN_14 = {{24{_acc_scale_unit_io_out_bits_data_5_0[7]}}, _acc_scale_unit_io_out_bits_data_5_0}; // @[Scratchpad.scala:578:32, :621:33]
assign io_acc_read_resp_0_bits_data_5_0_0 = _GEN_14; // @[Scratchpad.scala:205:9, :621:33]
assign io_acc_read_resp_1_bits_data_5_0_0 = _GEN_14; // @[Scratchpad.scala:205:9, :621:33]
wire [31:0] _GEN_15 = {{24{_acc_scale_unit_io_out_bits_data_6_0[7]}}, _acc_scale_unit_io_out_bits_data_6_0}; // @[Scratchpad.scala:578:32, :621:33]
assign io_acc_read_resp_0_bits_data_6_0_0 = _GEN_15; // @[Scratchpad.scala:205:9, :621:33]
assign io_acc_read_resp_1_bits_data_6_0_0 = _GEN_15; // @[Scratchpad.scala:205:9, :621:33]
wire [31:0] _GEN_16 = {{24{_acc_scale_unit_io_out_bits_data_7_0[7]}}, _acc_scale_unit_io_out_bits_data_7_0}; // @[Scratchpad.scala:578:32, :621:33]
assign io_acc_read_resp_0_bits_data_7_0_0 = _GEN_16; // @[Scratchpad.scala:205:9, :621:33]
assign io_acc_read_resp_1_bits_data_7_0_0 = _GEN_16; // @[Scratchpad.scala:205:9, :621:33]
wire [31:0] _GEN_17 = {{24{_acc_scale_unit_io_out_bits_data_8_0[7]}}, _acc_scale_unit_io_out_bits_data_8_0}; // @[Scratchpad.scala:578:32, :621:33]
assign io_acc_read_resp_0_bits_data_8_0_0 = _GEN_17; // @[Scratchpad.scala:205:9, :621:33]
assign io_acc_read_resp_1_bits_data_8_0_0 = _GEN_17; // @[Scratchpad.scala:205:9, :621:33]
wire [31:0] _GEN_18 = {{24{_acc_scale_unit_io_out_bits_data_9_0[7]}}, _acc_scale_unit_io_out_bits_data_9_0}; // @[Scratchpad.scala:578:32, :621:33]
assign io_acc_read_resp_0_bits_data_9_0_0 = _GEN_18; // @[Scratchpad.scala:205:9, :621:33]
assign io_acc_read_resp_1_bits_data_9_0_0 = _GEN_18; // @[Scratchpad.scala:205:9, :621:33]
wire [31:0] _GEN_19 = {{24{_acc_scale_unit_io_out_bits_data_10_0[7]}}, _acc_scale_unit_io_out_bits_data_10_0}; // @[Scratchpad.scala:578:32, :621:33]
assign io_acc_read_resp_0_bits_data_10_0_0 = _GEN_19; // @[Scratchpad.scala:205:9, :621:33]
assign io_acc_read_resp_1_bits_data_10_0_0 = _GEN_19; // @[Scratchpad.scala:205:9, :621:33]
wire [31:0] _GEN_20 = {{24{_acc_scale_unit_io_out_bits_data_11_0[7]}}, _acc_scale_unit_io_out_bits_data_11_0}; // @[Scratchpad.scala:578:32, :621:33]
assign io_acc_read_resp_0_bits_data_11_0_0 = _GEN_20; // @[Scratchpad.scala:205:9, :621:33]
assign io_acc_read_resp_1_bits_data_11_0_0 = _GEN_20; // @[Scratchpad.scala:205:9, :621:33]
wire [31:0] _GEN_21 = {{24{_acc_scale_unit_io_out_bits_data_12_0[7]}}, _acc_scale_unit_io_out_bits_data_12_0}; // @[Scratchpad.scala:578:32, :621:33]
assign io_acc_read_resp_0_bits_data_12_0_0 = _GEN_21; // @[Scratchpad.scala:205:9, :621:33]
assign io_acc_read_resp_1_bits_data_12_0_0 = _GEN_21; // @[Scratchpad.scala:205:9, :621:33]
wire [31:0] _GEN_22 = {{24{_acc_scale_unit_io_out_bits_data_13_0[7]}}, _acc_scale_unit_io_out_bits_data_13_0}; // @[Scratchpad.scala:578:32, :621:33]
assign io_acc_read_resp_0_bits_data_13_0_0 = _GEN_22; // @[Scratchpad.scala:205:9, :621:33]
assign io_acc_read_resp_1_bits_data_13_0_0 = _GEN_22; // @[Scratchpad.scala:205:9, :621:33]
wire [31:0] _GEN_23 = {{24{_acc_scale_unit_io_out_bits_data_14_0[7]}}, _acc_scale_unit_io_out_bits_data_14_0}; // @[Scratchpad.scala:578:32, :621:33]
assign io_acc_read_resp_0_bits_data_14_0_0 = _GEN_23; // @[Scratchpad.scala:205:9, :621:33]
assign io_acc_read_resp_1_bits_data_14_0_0 = _GEN_23; // @[Scratchpad.scala:205:9, :621:33]
wire [31:0] _GEN_24 = {{24{_acc_scale_unit_io_out_bits_data_15_0[7]}}, _acc_scale_unit_io_out_bits_data_15_0}; // @[Scratchpad.scala:578:32, :621:33]
assign io_acc_read_resp_0_bits_data_15_0_0 = _GEN_24; // @[Scratchpad.scala:205:9, :621:33]
assign io_acc_read_resp_1_bits_data_15_0_0 = _GEN_24; // @[Scratchpad.scala:205:9, :621:33]
wire _T_106 = ~_acc_scale_unit_io_out_bits_fromDMA & _acc_scale_unit_io_out_bits_acc_bank_id == 2'h0; // @[Scratchpad.scala:578:32, :622:{13,49,91}]
assign io_acc_read_resp_0_valid_0 = _T_106 & _acc_scale_unit_io_out_valid; // @[Scratchpad.scala:205:9, :578:32, :620:33, :622:{49,100}, :624:35]
wire _T_109 = ~_acc_scale_unit_io_out_bits_fromDMA & _acc_scale_unit_io_out_bits_acc_bank_id == 2'h1; // @[Scratchpad.scala:578:32, :622:{13,49,91}]
assign io_acc_read_resp_1_valid_0 = _T_109 & _acc_scale_unit_io_out_valid; // @[Scratchpad.scala:205:9, :578:32, :620:33, :622:{49,100}, :624:35]
assign io_acc_read_req_0_ready_0 = bank_ios_1_0_read_req_ready; // @[Scratchpad.scala:205:9, :637:29]
wire _bank_ios_0_read_req_valid_T_1; // @[Scratchpad.scala:661:38]
wire [8:0] _bank_ios_0_write_bits_addr_T_3; // @[Mux.scala:126:16]
wire _bank_ios_0_write_bits_acc_T_2; // @[Mux.scala:126:16]
assign io_acc_read_req_1_ready_0 = bank_ios_1_1_read_req_ready; // @[Scratchpad.scala:205:9, :637:29]
wire _bank_ios_1_read_req_valid_T_1; // @[Scratchpad.scala:661:38]
wire [8:0] _bank_ios_1_write_bits_addr_T_3; // @[Mux.scala:126:16]
wire _bank_ios_1_write_bits_acc_T_2; // @[Mux.scala:126:16]
wire [31:0] bank_ios_1_0_read_req_bits_scale_bits; // @[Scratchpad.scala:637:29]
wire [8:0] bank_ios_1_0_read_req_bits_addr; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_req_bits_igelu_qb; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_req_bits_igelu_qc; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_req_bits_iexp_qln2; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_req_bits_iexp_qln2_inv; // @[Scratchpad.scala:637:29]
wire [2:0] bank_ios_1_0_read_req_bits_act; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_read_req_bits_full; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_read_req_bits_fromDMA; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_read_req_valid; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_resp_bits_data_0_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_resp_bits_data_1_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_resp_bits_data_2_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_resp_bits_data_3_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_resp_bits_data_4_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_resp_bits_data_5_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_resp_bits_data_6_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_resp_bits_data_7_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_resp_bits_data_8_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_resp_bits_data_9_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_resp_bits_data_10_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_resp_bits_data_11_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_resp_bits_data_12_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_resp_bits_data_13_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_resp_bits_data_14_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_resp_bits_data_15_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_resp_bits_scale_bits; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_read_resp_bits_fromDMA; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_resp_bits_igelu_qb; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_resp_bits_igelu_qc; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_resp_bits_iexp_qln2; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_read_resp_bits_iexp_qln2_inv; // @[Scratchpad.scala:637:29]
wire [2:0] bank_ios_1_0_read_resp_bits_act; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_read_resp_ready; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_read_resp_valid; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_write_bits_data_0_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_write_bits_data_1_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_write_bits_data_2_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_write_bits_data_3_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_write_bits_data_4_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_write_bits_data_5_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_write_bits_data_6_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_write_bits_data_7_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_write_bits_data_8_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_write_bits_data_9_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_write_bits_data_10_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_write_bits_data_11_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_write_bits_data_12_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_write_bits_data_13_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_write_bits_data_14_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_write_bits_data_15_0; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_0; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_1; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_2; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_3; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_4; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_5; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_6; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_7; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_8; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_9; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_10; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_11; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_12; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_13; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_14; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_15; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_16; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_17; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_18; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_19; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_20; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_21; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_22; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_23; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_24; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_25; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_26; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_27; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_28; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_29; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_30; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_31; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_32; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_33; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_34; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_35; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_36; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_37; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_38; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_39; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_40; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_41; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_42; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_43; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_44; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_45; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_46; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_47; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_48; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_49; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_50; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_51; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_52; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_53; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_54; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_55; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_56; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_57; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_58; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_59; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_60; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_61; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_62; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_mask_63; // @[Scratchpad.scala:637:29]
wire [8:0] bank_ios_1_0_write_bits_addr; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_bits_acc; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_ready; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_write_valid; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op1_0_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op1_1_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op1_2_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op1_3_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op1_4_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op1_5_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op1_6_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op1_7_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op1_8_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op1_9_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op1_10_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op1_11_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op1_12_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op1_13_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op1_14_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op1_15_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op2_0_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op2_1_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op2_2_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op2_3_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op2_4_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op2_5_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op2_6_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op2_7_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op2_8_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op2_9_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op2_10_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op2_11_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op2_12_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op2_13_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op2_14_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_op2_15_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_sum_0_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_sum_1_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_sum_2_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_sum_3_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_sum_4_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_sum_5_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_sum_6_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_sum_7_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_sum_8_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_sum_9_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_sum_10_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_sum_11_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_sum_12_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_sum_13_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_sum_14_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_0_adder_sum_15_0; // @[Scratchpad.scala:637:29]
wire bank_ios_1_0_adder_valid; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_req_bits_scale_bits; // @[Scratchpad.scala:637:29]
wire [8:0] bank_ios_1_1_read_req_bits_addr; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_req_bits_igelu_qb; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_req_bits_igelu_qc; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_req_bits_iexp_qln2; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_req_bits_iexp_qln2_inv; // @[Scratchpad.scala:637:29]
wire [2:0] bank_ios_1_1_read_req_bits_act; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_read_req_bits_full; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_read_req_bits_fromDMA; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_read_req_valid; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_resp_bits_data_0_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_resp_bits_data_1_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_resp_bits_data_2_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_resp_bits_data_3_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_resp_bits_data_4_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_resp_bits_data_5_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_resp_bits_data_6_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_resp_bits_data_7_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_resp_bits_data_8_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_resp_bits_data_9_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_resp_bits_data_10_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_resp_bits_data_11_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_resp_bits_data_12_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_resp_bits_data_13_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_resp_bits_data_14_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_resp_bits_data_15_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_resp_bits_scale_bits; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_read_resp_bits_fromDMA; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_resp_bits_igelu_qb; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_resp_bits_igelu_qc; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_resp_bits_iexp_qln2; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_read_resp_bits_iexp_qln2_inv; // @[Scratchpad.scala:637:29]
wire [2:0] bank_ios_1_1_read_resp_bits_act; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_read_resp_ready; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_read_resp_valid; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_write_bits_data_0_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_write_bits_data_1_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_write_bits_data_2_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_write_bits_data_3_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_write_bits_data_4_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_write_bits_data_5_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_write_bits_data_6_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_write_bits_data_7_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_write_bits_data_8_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_write_bits_data_9_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_write_bits_data_10_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_write_bits_data_11_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_write_bits_data_12_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_write_bits_data_13_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_write_bits_data_14_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_write_bits_data_15_0; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_0; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_1; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_2; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_3; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_4; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_5; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_6; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_7; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_8; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_9; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_10; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_11; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_12; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_13; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_14; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_15; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_16; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_17; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_18; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_19; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_20; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_21; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_22; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_23; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_24; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_25; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_26; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_27; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_28; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_29; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_30; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_31; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_32; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_33; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_34; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_35; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_36; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_37; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_38; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_39; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_40; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_41; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_42; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_43; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_44; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_45; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_46; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_47; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_48; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_49; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_50; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_51; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_52; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_53; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_54; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_55; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_56; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_57; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_58; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_59; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_60; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_61; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_62; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_mask_63; // @[Scratchpad.scala:637:29]
wire [8:0] bank_ios_1_1_write_bits_addr; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_bits_acc; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_ready; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_write_valid; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op1_0_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op1_1_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op1_2_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op1_3_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op1_4_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op1_5_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op1_6_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op1_7_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op1_8_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op1_9_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op1_10_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op1_11_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op1_12_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op1_13_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op1_14_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op1_15_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op2_0_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op2_1_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op2_2_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op2_3_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op2_4_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op2_5_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op2_6_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op2_7_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op2_8_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op2_9_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op2_10_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op2_11_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op2_12_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op2_13_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op2_14_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_op2_15_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_sum_0_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_sum_1_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_sum_2_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_sum_3_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_sum_4_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_sum_5_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_sum_6_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_sum_7_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_sum_8_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_sum_9_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_sum_10_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_sum_11_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_sum_12_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_sum_13_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_sum_14_0; // @[Scratchpad.scala:637:29]
wire [31:0] bank_ios_1_1_adder_sum_15_0; // @[Scratchpad.scala:637:29]
wire bank_ios_1_1_adder_valid; // @[Scratchpad.scala:637:29]
wire _bank_issued_io_T = _write_issue_q_io_deq_bits_laddr_data[9]; // @[Scratchpad.scala:254:31]
wire _dmawrite_T_66 = _dmawrite_T_65 & _write_dispatch_q_q_io_deq_bits_laddr_read_full_acc_row; // @[Decoupled.scala:362:21]
wire _dmawrite_T_67 = &_write_dispatch_q_q_io_deq_bits_laddr_data; // @[Decoupled.scala:362:21]
wire _dmawrite_T_68 = _dmawrite_T_66 & _dmawrite_T_67; // @[LocalAddr.scala:43:{62,83,91}]
wire _dmawrite_T_69; // @[LocalAddr.scala:44:48]
wire _dmawrite_T_70 = _dmawrite_T_68 & _dmawrite_T_69; // @[LocalAddr.scala:43:{83,96}, :44:48]
wire _dmawrite_T_71 = ~_dmawrite_T_70; // @[Scratchpad.scala:658:11]
wire _dmawrite_T_72 = _dmawrite_T_64 & _dmawrite_T_71; // @[Scratchpad.scala:657:{47,76}, :658:11]
wire _dmawrite_T_73 = _dmawrite_T_72 & _write_dispatch_q_q_io_deq_bits_laddr_is_acc_addr; // @[Decoupled.scala:362:21]
wire _dmawrite_T_74 = _write_dispatch_q_q_io_deq_bits_laddr_data[9]; // @[Decoupled.scala:362:21]
wire _dmawrite_T_86 = _write_dispatch_q_q_io_deq_bits_laddr_data[9]; // @[Decoupled.scala:362:21]
wire _dmawrite_T_75 = ~_dmawrite_T_74; // @[Scratchpad.scala:659:93]
wire dmawrite_4 = _dmawrite_T_73 & _dmawrite_T_75; // @[Scratchpad.scala:658:53, :659:{51,93}]
assign _bank_ios_0_read_req_valid_T_1 = io_acc_read_req_0_valid_0 | dmawrite_4; // @[Scratchpad.scala:205:9, :659:51, :661:38]
assign bank_ios_1_0_read_req_valid = _bank_ios_0_read_req_valid_T_1; // @[Scratchpad.scala:637:29, :661:38]
wire [8:0] _bank_ios_0_read_req_bits_addr_T_1 = _write_dispatch_q_q_io_deq_bits_laddr_data[8:0]; // @[Decoupled.scala:362:21]
wire [8:0] _bank_ios_1_read_req_bits_addr_T_1 = _write_dispatch_q_q_io_deq_bits_laddr_data[8:0]; // @[Decoupled.scala:362:21]
wire [31:0] _bank_ios_0_read_req_bits_igelu_qb_T; // @[Scratchpad.scala:679:84]
wire [31:0] _bank_ios_0_read_req_bits_igelu_qb_WIRE = _bank_ios_0_read_req_bits_igelu_qb_T; // @[Scratchpad.scala:679:84]
wire [31:0] _bank_ios_0_read_req_bits_igelu_qc_T; // @[Scratchpad.scala:680:84]
wire [31:0] _bank_ios_0_read_req_bits_igelu_qc_WIRE = _bank_ios_0_read_req_bits_igelu_qc_T; // @[Scratchpad.scala:680:84]
wire [31:0] _bank_ios_0_read_req_bits_iexp_qln2_T; // @[Scratchpad.scala:681:86]
wire [31:0] _bank_ios_0_read_req_bits_iexp_qln2_WIRE = _bank_ios_0_read_req_bits_iexp_qln2_T; // @[Scratchpad.scala:681:86]
wire [31:0] _bank_ios_0_read_req_bits_iexp_qln2_inv_T; // @[Scratchpad.scala:682:94]
wire [31:0] _bank_ios_0_read_req_bits_iexp_qln2_inv_WIRE = _bank_ios_0_read_req_bits_iexp_qln2_inv_T; // @[Scratchpad.scala:682:94]
wire [31:0] _bank_ios_0_read_req_bits_scale_T; // @[Scratchpad.scala:683:78]
wire [31:0] _bank_ios_0_read_req_bits_scale_WIRE_1; // @[Scratchpad.scala:683:78]
assign _bank_ios_0_read_req_bits_scale_T = _bank_ios_0_read_req_bits_scale_WIRE_1; // @[Scratchpad.scala:683:78]
wire [31:0] _bank_ios_0_read_req_bits_scale_WIRE_bits = _bank_ios_0_read_req_bits_scale_T; // @[Scratchpad.scala:683:78]
wire _GEN_25 = ~io_acc_read_req_0_valid_0 & dmawrite_4 & bank_ios_1_0_read_req_ready & bank_ios_1_0_read_req_valid; // @[Decoupled.scala:51:35]
assign bank_ios_1_0_read_req_bits_fromDMA = ~io_acc_read_req_0_valid_0; // @[Scratchpad.scala:205:9, :637:29, :665:23, :674:37, :675:31]
assign bank_ios_1_0_read_req_bits_full = ~io_acc_read_req_0_valid_0 & _write_dispatch_q_q_io_deq_bits_laddr_read_full_acc_row; // @[Decoupled.scala:362:21]
assign bank_ios_1_0_read_req_bits_act = io_acc_read_req_0_valid_0 ? io_acc_read_req_0_bits_act_0 : _write_dispatch_q_q_io_deq_bits_acc_act; // @[Decoupled.scala:362:21]
assign bank_ios_1_0_read_req_bits_iexp_qln2_inv = io_acc_read_req_0_valid_0 ? 32'h0 : _bank_ios_0_read_req_bits_iexp_qln2_inv_WIRE; // @[Scratchpad.scala:205:9, :637:29, :665:23, :671:43, :675:31, :682:94]
assign bank_ios_1_0_read_req_bits_iexp_qln2 = io_acc_read_req_0_valid_0 ? 32'h0 : _bank_ios_0_read_req_bits_iexp_qln2_WIRE; // @[Scratchpad.scala:205:9, :637:29, :665:23, :670:39, :675:31, :681:86]
assign bank_ios_1_0_read_req_bits_igelu_qc = io_acc_read_req_0_valid_0 ? 32'h0 : _bank_ios_0_read_req_bits_igelu_qc_WIRE; // @[Scratchpad.scala:205:9, :637:29, :665:23, :669:38, :675:31, :680:84]
assign bank_ios_1_0_read_req_bits_igelu_qb = io_acc_read_req_0_valid_0 ? 32'h0 : _bank_ios_0_read_req_bits_igelu_qb_WIRE; // @[Scratchpad.scala:205:9, :637:29, :665:23, :668:38, :675:31, :679:84]
assign bank_ios_1_0_read_req_bits_addr = io_acc_read_req_0_valid_0 ? io_acc_read_req_0_bits_addr_0 : _bank_ios_0_read_req_bits_addr_T_1; // @[Scratchpad.scala:205:9, :637:29, :665:23, :666:34, :675:31]
assign bank_ios_1_0_read_req_bits_scale_bits = io_acc_read_req_0_valid_0 ? io_acc_read_req_0_bits_scale_bits_0 : _bank_ios_0_read_req_bits_scale_WIRE_bits; // @[Scratchpad.scala:205:9, :637:29, :665:23, :672:35, :675:31, :683:78]
wire _T_127 = _write_norm_q_io_deq_valid & _norm_unit_passthru_q_io_enq_ready; // @[Scratchpad.scala:252:30, :697:41]
assign bank_ios_1_0_read_resp_ready = _T_127 & bank_ios_1_0_read_resp_valid & _write_scale_q_io_enq_ready & _write_norm_q_io_deq_bits_laddr_is_acc_addr & ~(_T_131 & _write_norm_q_io_deq_bits_laddr_read_full_acc_row & (&_write_norm_q_io_deq_bits_laddr_data) & _write_norm_q_io_deq_bits_laddr_garbage_bit) & ~(_write_norm_q_io_deq_bits_laddr_data[9]); // @[Scratchpad.scala:252:30, :253:31, :637:29, :697:41, :698:34, :699:31, :700:38, :701:54, :702:{11,56}, :703:53]
wire _GEN_26 = _write_norm_q_io_deq_bits_laddr_norm_cmd == 3'h0; // @[Scratchpad.scala:252:30]
wire _write_scale_q_io_enq_valid_T; // @[NormCmd.scala:11:9]
assign _write_scale_q_io_enq_valid_T = _GEN_26; // @[NormCmd.scala:11:9]
wire _write_scale_q_io_enq_valid_T_1; // @[NormCmd.scala:11:9]
assign _write_scale_q_io_enq_valid_T_1 = _GEN_26; // @[NormCmd.scala:11:9]
wire _dmawrite_T_78 = _dmawrite_T_77 & _write_dispatch_q_q_io_deq_bits_laddr_read_full_acc_row; // @[Decoupled.scala:362:21]
wire _dmawrite_T_79 = &_write_dispatch_q_q_io_deq_bits_laddr_data; // @[Decoupled.scala:362:21]
wire _dmawrite_T_80 = _dmawrite_T_78 & _dmawrite_T_79; // @[LocalAddr.scala:43:{62,83,91}]
wire _dmawrite_T_81; // @[LocalAddr.scala:44:48]
wire _dmawrite_T_82 = _dmawrite_T_80 & _dmawrite_T_81; // @[LocalAddr.scala:43:{83,96}, :44:48]
wire _dmawrite_T_83 = ~_dmawrite_T_82; // @[Scratchpad.scala:658:11]
wire _dmawrite_T_84 = _dmawrite_T_76 & _dmawrite_T_83; // @[Scratchpad.scala:657:{47,76}, :658:11]
wire _dmawrite_T_85 = _dmawrite_T_84 & _write_dispatch_q_q_io_deq_bits_laddr_is_acc_addr; // @[Decoupled.scala:362:21]
wire _dmawrite_T_87 = _dmawrite_T_86; // @[Scratchpad.scala:659:93]
wire dmawrite_5 = _dmawrite_T_85 & _dmawrite_T_87; // @[Scratchpad.scala:658:53, :659:{51,93}]
assign _bank_ios_1_read_req_valid_T_1 = io_acc_read_req_1_valid_0 | dmawrite_5; // @[Scratchpad.scala:205:9, :659:51, :661:38]
assign bank_ios_1_1_read_req_valid = _bank_ios_1_read_req_valid_T_1; // @[Scratchpad.scala:637:29, :661:38]
wire [31:0] _bank_ios_1_read_req_bits_igelu_qb_T; // @[Scratchpad.scala:679:84]
wire [31:0] _bank_ios_1_read_req_bits_igelu_qb_WIRE = _bank_ios_1_read_req_bits_igelu_qb_T; // @[Scratchpad.scala:679:84]
wire [31:0] _bank_ios_1_read_req_bits_igelu_qc_T; // @[Scratchpad.scala:680:84]
wire [31:0] _bank_ios_1_read_req_bits_igelu_qc_WIRE = _bank_ios_1_read_req_bits_igelu_qc_T; // @[Scratchpad.scala:680:84]
wire [31:0] _bank_ios_1_read_req_bits_iexp_qln2_T; // @[Scratchpad.scala:681:86]
wire [31:0] _bank_ios_1_read_req_bits_iexp_qln2_WIRE = _bank_ios_1_read_req_bits_iexp_qln2_T; // @[Scratchpad.scala:681:86]
wire [31:0] _bank_ios_1_read_req_bits_iexp_qln2_inv_T; // @[Scratchpad.scala:682:94]
wire [31:0] _bank_ios_1_read_req_bits_iexp_qln2_inv_WIRE = _bank_ios_1_read_req_bits_iexp_qln2_inv_T; // @[Scratchpad.scala:682:94]
wire [31:0] _bank_ios_1_read_req_bits_scale_T; // @[Scratchpad.scala:683:78]
wire [31:0] _bank_ios_1_read_req_bits_scale_WIRE_1; // @[Scratchpad.scala:683:78]
assign _bank_ios_1_read_req_bits_scale_T = _bank_ios_1_read_req_bits_scale_WIRE_1; // @[Scratchpad.scala:683:78]
wire [31:0] _bank_ios_1_read_req_bits_scale_WIRE_bits = _bank_ios_1_read_req_bits_scale_T; // @[Scratchpad.scala:683:78]
wire _GEN_27 = ~io_acc_read_req_1_valid_0 & dmawrite_5 & bank_ios_1_1_read_req_ready & bank_ios_1_1_read_req_valid; // @[Decoupled.scala:51:35]
assign write_dispatch_q_q_io_deq_ready = _GEN_27 | _GEN_25 | _GEN_5 | _GEN_4 | _GEN_3 | _GEN_2 | _T_5 & _write_norm_q_io_enq_ready; // @[Decoupled.scala:51:35]
assign io_dma_write_resp_valid_0 = _GEN_27 | _GEN_25 | _GEN_5 | _GEN_4 | _GEN_3 | _GEN_2 | _T_22 & _write_dispatch_q_q_io_deq_bits_laddr_read_full_acc_row & (&_write_dispatch_q_q_io_deq_bits_laddr_data) & _write_dispatch_q_q_io_deq_bits_laddr_garbage_bit & write_dispatch_q_q_io_deq_ready & _write_dispatch_q_q_io_deq_valid; // @[Decoupled.scala:51:35, :362:21]
assign bank_ios_1_1_read_req_bits_fromDMA = ~io_acc_read_req_1_valid_0; // @[Scratchpad.scala:205:9, :637:29, :665:23, :674:37, :675:31]
assign bank_ios_1_1_read_req_bits_full = ~io_acc_read_req_1_valid_0 & _write_dispatch_q_q_io_deq_bits_laddr_read_full_acc_row; // @[Decoupled.scala:362:21]
assign bank_ios_1_1_read_req_bits_act = io_acc_read_req_1_valid_0 ? io_acc_read_req_1_bits_act_0 : _write_dispatch_q_q_io_deq_bits_acc_act; // @[Decoupled.scala:362:21]
assign bank_ios_1_1_read_req_bits_iexp_qln2_inv = io_acc_read_req_1_valid_0 ? 32'h0 : _bank_ios_1_read_req_bits_iexp_qln2_inv_WIRE; // @[Scratchpad.scala:205:9, :637:29, :665:23, :671:43, :675:31, :682:94]
assign bank_ios_1_1_read_req_bits_iexp_qln2 = io_acc_read_req_1_valid_0 ? 32'h0 : _bank_ios_1_read_req_bits_iexp_qln2_WIRE; // @[Scratchpad.scala:205:9, :637:29, :665:23, :670:39, :675:31, :681:86]
assign bank_ios_1_1_read_req_bits_igelu_qc = io_acc_read_req_1_valid_0 ? 32'h0 : _bank_ios_1_read_req_bits_igelu_qc_WIRE; // @[Scratchpad.scala:205:9, :637:29, :665:23, :669:38, :675:31, :680:84]
assign bank_ios_1_1_read_req_bits_igelu_qb = io_acc_read_req_1_valid_0 ? 32'h0 : _bank_ios_1_read_req_bits_igelu_qb_WIRE; // @[Scratchpad.scala:205:9, :637:29, :665:23, :668:38, :675:31, :679:84]
assign bank_ios_1_1_read_req_bits_addr = io_acc_read_req_1_valid_0 ? io_acc_read_req_1_bits_addr_0 : _bank_ios_1_read_req_bits_addr_T_1; // @[Scratchpad.scala:205:9, :637:29, :665:23, :666:34, :675:31]
assign bank_ios_1_1_read_req_bits_scale_bits = io_acc_read_req_1_valid_0 ? io_acc_read_req_1_bits_scale_bits_0 : _bank_ios_1_read_req_bits_scale_WIRE_bits; // @[Scratchpad.scala:205:9, :637:29, :665:23, :672:35, :675:31, :683:78]
assign bank_ios_1_1_read_resp_ready = _T_127 & bank_ios_1_1_read_resp_valid & _write_scale_q_io_enq_ready & _write_norm_q_io_deq_bits_laddr_is_acc_addr & ~(_T_131 & _write_norm_q_io_deq_bits_laddr_read_full_acc_row & (&_write_norm_q_io_deq_bits_laddr_data) & _write_norm_q_io_deq_bits_laddr_garbage_bit) & _write_norm_q_io_deq_bits_laddr_data[9]; // @[Scratchpad.scala:252:30, :253:31, :637:29, :697:41, :698:34, :699:31, :700:38, :701:54, :702:{11,56}]
wire _GEN_28 = _mvin_scale_pixel_repeater_io_resp_valid & _mvin_scale_pixel_repeater_io_resp_bits_tag_is_acc; // @[Scratchpad.scala:387:43, :727:71]
wire from_mvin_scale; // @[Scratchpad.scala:727:71]
assign from_mvin_scale = _GEN_28; // @[Scratchpad.scala:727:71]
wire from_mvin_scale_1; // @[Scratchpad.scala:727:71]
assign from_mvin_scale_1 = _GEN_28; // @[Scratchpad.scala:727:71]
wire _GEN_29 = _vsm_1_io_resp_valid & _vsm_1_io_resp_bits_tag_is_acc; // @[VectorScalarMultiplier.scala:200:21]
wire from_mvin_scale_acc; // @[Scratchpad.scala:728:60]
assign from_mvin_scale_acc = _GEN_29; // @[Scratchpad.scala:728:60]
wire from_mvin_scale_acc_1; // @[Scratchpad.scala:728:60]
assign from_mvin_scale_acc_1 = _GEN_29; // @[Scratchpad.scala:728:60]
wire _mvin_scale_acc_laddr_T_6; // @[Scratchpad.scala:732:77]
wire _mvin_scale_acc_laddr_T_5; // @[Scratchpad.scala:732:77]
wire mvin_scale_acc_laddr_is_acc_addr = _mvin_scale_acc_laddr_WIRE_is_acc_addr; // @[Scratchpad.scala:732:77]
wire _mvin_scale_acc_laddr_T_4; // @[Scratchpad.scala:732:77]
wire mvin_scale_acc_laddr_accumulate = _mvin_scale_acc_laddr_WIRE_accumulate; // @[Scratchpad.scala:732:77]
wire [2:0] _mvin_scale_acc_laddr_WIRE_3; // @[Scratchpad.scala:732:77]
wire mvin_scale_acc_laddr_read_full_acc_row = _mvin_scale_acc_laddr_WIRE_read_full_acc_row; // @[Scratchpad.scala:732:77]
wire [10:0] _mvin_scale_acc_laddr_T_2; // @[Scratchpad.scala:732:77]
wire [2:0] mvin_scale_acc_laddr_norm_cmd = _mvin_scale_acc_laddr_WIRE_norm_cmd; // @[Scratchpad.scala:732:77]
wire _mvin_scale_acc_laddr_T_1; // @[Scratchpad.scala:732:77]
wire [10:0] mvin_scale_acc_laddr_garbage = _mvin_scale_acc_laddr_WIRE_garbage; // @[Scratchpad.scala:732:77]
wire [13:0] _mvin_scale_acc_laddr_T; // @[Scratchpad.scala:732:77]
wire mvin_scale_acc_laddr_garbage_bit = _mvin_scale_acc_laddr_WIRE_garbage_bit; // @[Scratchpad.scala:732:77]
wire [31:0] _GEN_30 = {18'h0, _vsm_1_io_resp_bits_tag_addr}; // @[VectorScalarMultiplier.scala:200:21]
wire [31:0] _mvin_scale_acc_laddr_WIRE_1; // @[Scratchpad.scala:732:77]
assign _mvin_scale_acc_laddr_WIRE_1 = _GEN_30; // @[Scratchpad.scala:732:77]
wire [31:0] _mvin_scale_acc_laddr_WIRE_5; // @[Scratchpad.scala:732:77]
assign _mvin_scale_acc_laddr_WIRE_5 = _GEN_30; // @[Scratchpad.scala:732:77]
assign _mvin_scale_acc_laddr_T = _mvin_scale_acc_laddr_WIRE_1[13:0]; // @[Scratchpad.scala:732:77]
wire [13:0] _mvin_scale_acc_laddr_WIRE_data = _mvin_scale_acc_laddr_T; // @[Scratchpad.scala:732:77]
assign _mvin_scale_acc_laddr_T_1 = _mvin_scale_acc_laddr_WIRE_1[14]; // @[Scratchpad.scala:732:77]
assign _mvin_scale_acc_laddr_WIRE_garbage_bit = _mvin_scale_acc_laddr_T_1; // @[Scratchpad.scala:732:77]
assign _mvin_scale_acc_laddr_T_2 = _mvin_scale_acc_laddr_WIRE_1[25:15]; // @[Scratchpad.scala:732:77]
assign _mvin_scale_acc_laddr_WIRE_garbage = _mvin_scale_acc_laddr_T_2; // @[Scratchpad.scala:732:77]
wire [2:0] _mvin_scale_acc_laddr_T_3 = _mvin_scale_acc_laddr_WIRE_1[28:26]; // @[Scratchpad.scala:732:77]
wire [2:0] _mvin_scale_acc_laddr_WIRE_2 = _mvin_scale_acc_laddr_T_3; // @[Scratchpad.scala:732:77]
assign _mvin_scale_acc_laddr_WIRE_3 = _mvin_scale_acc_laddr_WIRE_2; // @[Scratchpad.scala:732:77]
assign _mvin_scale_acc_laddr_WIRE_norm_cmd = _mvin_scale_acc_laddr_WIRE_3; // @[Scratchpad.scala:732:77]
assign _mvin_scale_acc_laddr_T_4 = _mvin_scale_acc_laddr_WIRE_1[29]; // @[Scratchpad.scala:732:77]
assign _mvin_scale_acc_laddr_WIRE_read_full_acc_row = _mvin_scale_acc_laddr_T_4; // @[Scratchpad.scala:732:77]
assign _mvin_scale_acc_laddr_T_5 = _mvin_scale_acc_laddr_WIRE_1[30]; // @[Scratchpad.scala:732:77]
assign _mvin_scale_acc_laddr_WIRE_accumulate = _mvin_scale_acc_laddr_T_5; // @[Scratchpad.scala:732:77]
assign _mvin_scale_acc_laddr_T_6 = _mvin_scale_acc_laddr_WIRE_1[31]; // @[Scratchpad.scala:732:77]
assign _mvin_scale_acc_laddr_WIRE_is_acc_addr = _mvin_scale_acc_laddr_T_6; // @[Scratchpad.scala:732:77]
wire [13:0] mvin_scale_acc_laddr_data; // @[LocalAddr.scala:50:26]
wire [16:0] _GEN_31 = {1'h0, _vsm_1_io_resp_bits_row}; // @[VectorScalarMultiplier.scala:200:21]
wire [16:0] _mvin_scale_acc_laddr_result_data_T = {3'h0, _mvin_scale_acc_laddr_WIRE_data} + _GEN_31; // @[Scratchpad.scala:732:77]
wire [15:0] _mvin_scale_acc_laddr_result_data_T_1 = _mvin_scale_acc_laddr_result_data_T[15:0]; // @[LocalAddr.scala:51:25]
assign mvin_scale_acc_laddr_data = _mvin_scale_acc_laddr_result_data_T_1[13:0]; // @[LocalAddr.scala:50:26, :51:{17,25}]
wire _dmaread_bank_T = _mvin_scale_pixel_repeater_io_resp_bits_laddr_data[9]; // @[Scratchpad.scala:387:43]
wire _dmaread_bank_T_2 = _mvin_scale_pixel_repeater_io_resp_bits_laddr_data[9]; // @[Scratchpad.scala:387:43]
wire _dmaread_bank_T_1 = mvin_scale_acc_laddr_data[9]; // @[LocalAddr.scala:35:82, :50:26]
wire dmaread_bank = from_mvin_scale ? _dmaread_bank_T : _dmaread_bank_T_1; // @[Scratchpad.scala:727:71, :734:31]
wire [8:0] _dmaread_row_T = _mvin_scale_pixel_repeater_io_resp_bits_laddr_data[8:0]; // @[Scratchpad.scala:387:43]
wire [8:0] _dmaread_row_T_2 = _mvin_scale_pixel_repeater_io_resp_bits_laddr_data[8:0]; // @[Scratchpad.scala:387:43]
wire [8:0] _dmaread_row_T_1 = mvin_scale_acc_laddr_data[8:0]; // @[LocalAddr.scala:36:37, :50:26]
wire [8:0] dmaread_row = from_mvin_scale ? _dmaread_row_T : _dmaread_row_T_1; // @[Scratchpad.scala:727:71, :736:30]
wire _spad_last_T_1 = ~_mvin_scale_pixel_repeater_io_resp_bits_tag_is_acc; // @[Scratchpad.scala:387:43, :520:66, :740:115]
wire spad_last = _spad_last_T & _spad_last_T_1; // @[Scratchpad.scala:740:{65,112,115}]
wire _GEN_32 = from_mvin_scale | from_mvin_scale_acc; // @[Scratchpad.scala:727:71, :728:60, :742:40]
wire _dmaread_T_16; // @[Scratchpad.scala:742:40]
assign _dmaread_T_16 = _GEN_32; // @[Scratchpad.scala:742:40]
wire _bank_ios_0_write_bits_addr_T_1; // @[Scratchpad.scala:780:30]
assign _bank_ios_0_write_bits_addr_T_1 = _GEN_32; // @[Scratchpad.scala:742:40, :780:30]
wire _dmaread_T_17 = ~dmaread_bank; // @[Scratchpad.scala:734:31, :743:24]
wire dmaread_4 = _dmaread_T_16 & _dmaread_T_17; // @[Scratchpad.scala:742:{40,64}, :743:24]
wire _GEN_33 = _zero_writer_pixel_repeater_io_resp_valid & _zero_writer_pixel_repeater_io_resp_bits_laddr_is_acc_addr; // @[Scratchpad.scala:330:44, :750:66]
wire _zerowrite_T_36; // @[Scratchpad.scala:750:66]
assign _zerowrite_T_36 = _GEN_33; // @[Scratchpad.scala:750:66]
wire _zerowrite_T_44; // @[Scratchpad.scala:750:66]
assign _zerowrite_T_44 = _GEN_33; // @[Scratchpad.scala:750:66]
wire _zerowrite_T_37 = _zero_writer_pixel_repeater_io_resp_bits_laddr_data[9]; // @[Scratchpad.scala:330:44]
wire _zerowrite_T_45 = _zero_writer_pixel_repeater_io_resp_bits_laddr_data[9]; // @[Scratchpad.scala:330:44]
wire _zerowrite_T_38 = ~_zerowrite_T_37; // @[Scratchpad.scala:751:68]
wire _zerowrite_T_39 = _zerowrite_T_36 & _zerowrite_T_38; // @[Scratchpad.scala:750:{66,127}, :751:68]
wire _zerowrite_T_42 = _zerowrite_T_40 | _zerowrite_T_41; // @[Scratchpad.scala:753:{54,102,131}]
wire _zerowrite_T_43 = ~_zerowrite_T_42; // @[Scratchpad.scala:753:{11,102}]
wire zerowrite_4 = _zerowrite_T_39 & _zerowrite_T_43; // @[Scratchpad.scala:750:127, :751:76, :753:11]
wire _bank_ios_0_write_bits_acc_T = from_mvin_scale_acc ? _vsm_1_io_resp_bits_tag_accumulate : _zero_writer_pixel_repeater_io_resp_bits_laddr_accumulate; // @[Mux.scala:126:16]
wire _bank_ios_0_write_bits_acc_T_1 = from_mvin_scale ? _mvin_scale_pixel_repeater_io_resp_bits_tag_accumulate : _bank_ios_0_write_bits_acc_T; // @[Mux.scala:126:16]
assign _bank_ios_0_write_bits_acc_T_2 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_acc_0 : _bank_ios_0_write_bits_acc_T_1; // @[Mux.scala:126:16]
assign bank_ios_1_0_write_bits_acc = _bank_ios_0_write_bits_acc_T_2; // @[Mux.scala:126:16]
wire [8:0] _bank_ios_0_write_bits_addr_T = _zero_writer_pixel_repeater_io_resp_bits_laddr_data[8:0]; // @[Scratchpad.scala:330:44]
wire [8:0] _bank_ios_1_write_bits_addr_T = _zero_writer_pixel_repeater_io_resp_bits_laddr_data[8:0]; // @[Scratchpad.scala:330:44]
wire [8:0] _bank_ios_0_write_bits_addr_T_2 = _bank_ios_0_write_bits_addr_T_1 ? dmaread_row : _bank_ios_0_write_bits_addr_T; // @[Mux.scala:126:16]
assign _bank_ios_0_write_bits_addr_T_3 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_addr_0 : _bank_ios_0_write_bits_addr_T_2; // @[Mux.scala:126:16]
assign bank_ios_1_0_write_bits_addr = _bank_ios_0_write_bits_addr_T_3; // @[Mux.scala:126:16]
wire _T_149 = dmaread_4 & ~spad_last; // @[Scratchpad.scala:740:112, :742:64, :786:{29,32}]
wire sign = _mvin_scale_pixel_repeater_io_resp_bits_out_0[7]; // @[Scratchpad.scala:387:43]
wire sign_16 = _mvin_scale_pixel_repeater_io_resp_bits_out_0[7]; // @[Scratchpad.scala:387:43]
wire [1:0] _GEN_34 = {2{sign}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] lo_lo_lo_hi; // @[Arithmetic.scala:118:18]
assign lo_lo_lo_hi = _GEN_34; // @[Arithmetic.scala:118:18]
wire [1:0] lo_lo_hi_hi; // @[Arithmetic.scala:118:18]
assign lo_lo_hi_hi = _GEN_34; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_lo_hi; // @[Arithmetic.scala:118:18]
assign lo_hi_lo_hi = _GEN_34; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_hi_hi; // @[Arithmetic.scala:118:18]
assign lo_hi_hi_hi = _GEN_34; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_lo_hi; // @[Arithmetic.scala:118:18]
assign hi_lo_lo_hi = _GEN_34; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_hi_hi; // @[Arithmetic.scala:118:18]
assign hi_lo_hi_hi = _GEN_34; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_lo_hi; // @[Arithmetic.scala:118:18]
assign hi_hi_lo_hi = _GEN_34; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_hi_hi; // @[Arithmetic.scala:118:18]
assign hi_hi_hi_hi = _GEN_34; // @[Arithmetic.scala:118:18]
wire [2:0] lo_lo_lo = {lo_lo_lo_hi, sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_lo_hi = {lo_lo_hi_hi, sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_lo = {lo_lo_hi, lo_lo_lo}; // @[Arithmetic.scala:118:18]
wire [2:0] lo_hi_lo = {lo_hi_lo_hi, sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_hi_hi = {lo_hi_hi_hi, sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_hi = {lo_hi_hi, lo_hi_lo}; // @[Arithmetic.scala:118:18]
wire [11:0] lo = {lo_hi, lo_lo}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_lo_lo = {hi_lo_lo_hi, sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_lo_hi = {hi_lo_hi_hi, sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_lo = {hi_lo_hi, hi_lo_lo}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_hi_lo = {hi_hi_lo_hi, sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_hi_hi = {hi_hi_hi_hi, sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_hi = {hi_hi_hi, hi_hi_lo}; // @[Arithmetic.scala:118:18]
wire [11:0] hi = {hi_hi, hi_lo}; // @[Arithmetic.scala:118:18]
wire [7:0] lo_1; // @[Arithmetic.scala:118:14]
wire sign_1 = _mvin_scale_pixel_repeater_io_resp_bits_out_1[7]; // @[Scratchpad.scala:387:43]
wire sign_17 = _mvin_scale_pixel_repeater_io_resp_bits_out_1[7]; // @[Scratchpad.scala:387:43]
wire [1:0] _GEN_35 = {2{sign_1}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] lo_lo_lo_hi_1; // @[Arithmetic.scala:118:18]
assign lo_lo_lo_hi_1 = _GEN_35; // @[Arithmetic.scala:118:18]
wire [1:0] lo_lo_hi_hi_1; // @[Arithmetic.scala:118:18]
assign lo_lo_hi_hi_1 = _GEN_35; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_lo_hi_1; // @[Arithmetic.scala:118:18]
assign lo_hi_lo_hi_1 = _GEN_35; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_hi_hi_1; // @[Arithmetic.scala:118:18]
assign lo_hi_hi_hi_1 = _GEN_35; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_lo_hi_1; // @[Arithmetic.scala:118:18]
assign hi_lo_lo_hi_1 = _GEN_35; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_hi_hi_1; // @[Arithmetic.scala:118:18]
assign hi_lo_hi_hi_1 = _GEN_35; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_lo_hi_1; // @[Arithmetic.scala:118:18]
assign hi_hi_lo_hi_1 = _GEN_35; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_hi_hi_1; // @[Arithmetic.scala:118:18]
assign hi_hi_hi_hi_1 = _GEN_35; // @[Arithmetic.scala:118:18]
wire [2:0] lo_lo_lo_1 = {lo_lo_lo_hi_1, sign_1}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_lo_hi_1 = {lo_lo_hi_hi_1, sign_1}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_lo_1 = {lo_lo_hi_1, lo_lo_lo_1}; // @[Arithmetic.scala:118:18]
wire [2:0] lo_hi_lo_1 = {lo_hi_lo_hi_1, sign_1}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_hi_hi_1 = {lo_hi_hi_hi_1, sign_1}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_hi_1 = {lo_hi_hi_1, lo_hi_lo_1}; // @[Arithmetic.scala:118:18]
wire [11:0] lo_2 = {lo_hi_1, lo_lo_1}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_lo_lo_1 = {hi_lo_lo_hi_1, sign_1}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_lo_hi_1 = {hi_lo_hi_hi_1, sign_1}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_lo_1 = {hi_lo_hi_1, hi_lo_lo_1}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_hi_lo_1 = {hi_hi_lo_hi_1, sign_1}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_hi_hi_1 = {hi_hi_hi_hi_1, sign_1}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_hi_1 = {hi_hi_hi_1, hi_hi_lo_1}; // @[Arithmetic.scala:118:18]
wire [11:0] hi_1 = {hi_hi_1, hi_lo_1}; // @[Arithmetic.scala:118:18]
wire [7:0] lo_3; // @[Arithmetic.scala:118:14]
wire sign_2 = _mvin_scale_pixel_repeater_io_resp_bits_out_2[7]; // @[Scratchpad.scala:387:43]
wire sign_18 = _mvin_scale_pixel_repeater_io_resp_bits_out_2[7]; // @[Scratchpad.scala:387:43]
wire [1:0] _GEN_36 = {2{sign_2}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] lo_lo_lo_hi_2; // @[Arithmetic.scala:118:18]
assign lo_lo_lo_hi_2 = _GEN_36; // @[Arithmetic.scala:118:18]
wire [1:0] lo_lo_hi_hi_2; // @[Arithmetic.scala:118:18]
assign lo_lo_hi_hi_2 = _GEN_36; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_lo_hi_2; // @[Arithmetic.scala:118:18]
assign lo_hi_lo_hi_2 = _GEN_36; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_hi_hi_2; // @[Arithmetic.scala:118:18]
assign lo_hi_hi_hi_2 = _GEN_36; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_lo_hi_2; // @[Arithmetic.scala:118:18]
assign hi_lo_lo_hi_2 = _GEN_36; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_hi_hi_2; // @[Arithmetic.scala:118:18]
assign hi_lo_hi_hi_2 = _GEN_36; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_lo_hi_2; // @[Arithmetic.scala:118:18]
assign hi_hi_lo_hi_2 = _GEN_36; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_hi_hi_2; // @[Arithmetic.scala:118:18]
assign hi_hi_hi_hi_2 = _GEN_36; // @[Arithmetic.scala:118:18]
wire [2:0] lo_lo_lo_2 = {lo_lo_lo_hi_2, sign_2}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_lo_hi_2 = {lo_lo_hi_hi_2, sign_2}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_lo_2 = {lo_lo_hi_2, lo_lo_lo_2}; // @[Arithmetic.scala:118:18]
wire [2:0] lo_hi_lo_2 = {lo_hi_lo_hi_2, sign_2}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_hi_hi_2 = {lo_hi_hi_hi_2, sign_2}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_hi_2 = {lo_hi_hi_2, lo_hi_lo_2}; // @[Arithmetic.scala:118:18]
wire [11:0] lo_4 = {lo_hi_2, lo_lo_2}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_lo_lo_2 = {hi_lo_lo_hi_2, sign_2}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_lo_hi_2 = {hi_lo_hi_hi_2, sign_2}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_lo_2 = {hi_lo_hi_2, hi_lo_lo_2}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_hi_lo_2 = {hi_hi_lo_hi_2, sign_2}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_hi_hi_2 = {hi_hi_hi_hi_2, sign_2}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_hi_2 = {hi_hi_hi_2, hi_hi_lo_2}; // @[Arithmetic.scala:118:18]
wire [11:0] hi_2 = {hi_hi_2, hi_lo_2}; // @[Arithmetic.scala:118:18]
wire [7:0] lo_5; // @[Arithmetic.scala:118:14]
wire sign_3 = _mvin_scale_pixel_repeater_io_resp_bits_out_3[7]; // @[Scratchpad.scala:387:43]
wire sign_19 = _mvin_scale_pixel_repeater_io_resp_bits_out_3[7]; // @[Scratchpad.scala:387:43]
wire [1:0] _GEN_37 = {2{sign_3}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] lo_lo_lo_hi_3; // @[Arithmetic.scala:118:18]
assign lo_lo_lo_hi_3 = _GEN_37; // @[Arithmetic.scala:118:18]
wire [1:0] lo_lo_hi_hi_3; // @[Arithmetic.scala:118:18]
assign lo_lo_hi_hi_3 = _GEN_37; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_lo_hi_3; // @[Arithmetic.scala:118:18]
assign lo_hi_lo_hi_3 = _GEN_37; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_hi_hi_3; // @[Arithmetic.scala:118:18]
assign lo_hi_hi_hi_3 = _GEN_37; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_lo_hi_3; // @[Arithmetic.scala:118:18]
assign hi_lo_lo_hi_3 = _GEN_37; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_hi_hi_3; // @[Arithmetic.scala:118:18]
assign hi_lo_hi_hi_3 = _GEN_37; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_lo_hi_3; // @[Arithmetic.scala:118:18]
assign hi_hi_lo_hi_3 = _GEN_37; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_hi_hi_3; // @[Arithmetic.scala:118:18]
assign hi_hi_hi_hi_3 = _GEN_37; // @[Arithmetic.scala:118:18]
wire [2:0] lo_lo_lo_3 = {lo_lo_lo_hi_3, sign_3}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_lo_hi_3 = {lo_lo_hi_hi_3, sign_3}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_lo_3 = {lo_lo_hi_3, lo_lo_lo_3}; // @[Arithmetic.scala:118:18]
wire [2:0] lo_hi_lo_3 = {lo_hi_lo_hi_3, sign_3}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_hi_hi_3 = {lo_hi_hi_hi_3, sign_3}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_hi_3 = {lo_hi_hi_3, lo_hi_lo_3}; // @[Arithmetic.scala:118:18]
wire [11:0] lo_6 = {lo_hi_3, lo_lo_3}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_lo_lo_3 = {hi_lo_lo_hi_3, sign_3}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_lo_hi_3 = {hi_lo_hi_hi_3, sign_3}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_lo_3 = {hi_lo_hi_3, hi_lo_lo_3}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_hi_lo_3 = {hi_hi_lo_hi_3, sign_3}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_hi_hi_3 = {hi_hi_hi_hi_3, sign_3}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_hi_3 = {hi_hi_hi_3, hi_hi_lo_3}; // @[Arithmetic.scala:118:18]
wire [11:0] hi_3 = {hi_hi_3, hi_lo_3}; // @[Arithmetic.scala:118:18]
wire [7:0] lo_7; // @[Arithmetic.scala:118:14]
wire sign_4 = _mvin_scale_pixel_repeater_io_resp_bits_out_4[7]; // @[Scratchpad.scala:387:43]
wire sign_20 = _mvin_scale_pixel_repeater_io_resp_bits_out_4[7]; // @[Scratchpad.scala:387:43]
wire [1:0] _GEN_38 = {2{sign_4}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] lo_lo_lo_hi_4; // @[Arithmetic.scala:118:18]
assign lo_lo_lo_hi_4 = _GEN_38; // @[Arithmetic.scala:118:18]
wire [1:0] lo_lo_hi_hi_4; // @[Arithmetic.scala:118:18]
assign lo_lo_hi_hi_4 = _GEN_38; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_lo_hi_4; // @[Arithmetic.scala:118:18]
assign lo_hi_lo_hi_4 = _GEN_38; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_hi_hi_4; // @[Arithmetic.scala:118:18]
assign lo_hi_hi_hi_4 = _GEN_38; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_lo_hi_4; // @[Arithmetic.scala:118:18]
assign hi_lo_lo_hi_4 = _GEN_38; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_hi_hi_4; // @[Arithmetic.scala:118:18]
assign hi_lo_hi_hi_4 = _GEN_38; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_lo_hi_4; // @[Arithmetic.scala:118:18]
assign hi_hi_lo_hi_4 = _GEN_38; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_hi_hi_4; // @[Arithmetic.scala:118:18]
assign hi_hi_hi_hi_4 = _GEN_38; // @[Arithmetic.scala:118:18]
wire [2:0] lo_lo_lo_4 = {lo_lo_lo_hi_4, sign_4}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_lo_hi_4 = {lo_lo_hi_hi_4, sign_4}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_lo_4 = {lo_lo_hi_4, lo_lo_lo_4}; // @[Arithmetic.scala:118:18]
wire [2:0] lo_hi_lo_4 = {lo_hi_lo_hi_4, sign_4}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_hi_hi_4 = {lo_hi_hi_hi_4, sign_4}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_hi_4 = {lo_hi_hi_4, lo_hi_lo_4}; // @[Arithmetic.scala:118:18]
wire [11:0] lo_8 = {lo_hi_4, lo_lo_4}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_lo_lo_4 = {hi_lo_lo_hi_4, sign_4}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_lo_hi_4 = {hi_lo_hi_hi_4, sign_4}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_lo_4 = {hi_lo_hi_4, hi_lo_lo_4}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_hi_lo_4 = {hi_hi_lo_hi_4, sign_4}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_hi_hi_4 = {hi_hi_hi_hi_4, sign_4}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_hi_4 = {hi_hi_hi_4, hi_hi_lo_4}; // @[Arithmetic.scala:118:18]
wire [11:0] hi_4 = {hi_hi_4, hi_lo_4}; // @[Arithmetic.scala:118:18]
wire [7:0] lo_9; // @[Arithmetic.scala:118:14]
wire sign_5 = _mvin_scale_pixel_repeater_io_resp_bits_out_5[7]; // @[Scratchpad.scala:387:43]
wire sign_21 = _mvin_scale_pixel_repeater_io_resp_bits_out_5[7]; // @[Scratchpad.scala:387:43]
wire [1:0] _GEN_39 = {2{sign_5}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] lo_lo_lo_hi_5; // @[Arithmetic.scala:118:18]
assign lo_lo_lo_hi_5 = _GEN_39; // @[Arithmetic.scala:118:18]
wire [1:0] lo_lo_hi_hi_5; // @[Arithmetic.scala:118:18]
assign lo_lo_hi_hi_5 = _GEN_39; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_lo_hi_5; // @[Arithmetic.scala:118:18]
assign lo_hi_lo_hi_5 = _GEN_39; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_hi_hi_5; // @[Arithmetic.scala:118:18]
assign lo_hi_hi_hi_5 = _GEN_39; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_lo_hi_5; // @[Arithmetic.scala:118:18]
assign hi_lo_lo_hi_5 = _GEN_39; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_hi_hi_5; // @[Arithmetic.scala:118:18]
assign hi_lo_hi_hi_5 = _GEN_39; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_lo_hi_5; // @[Arithmetic.scala:118:18]
assign hi_hi_lo_hi_5 = _GEN_39; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_hi_hi_5; // @[Arithmetic.scala:118:18]
assign hi_hi_hi_hi_5 = _GEN_39; // @[Arithmetic.scala:118:18]
wire [2:0] lo_lo_lo_5 = {lo_lo_lo_hi_5, sign_5}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_lo_hi_5 = {lo_lo_hi_hi_5, sign_5}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_lo_5 = {lo_lo_hi_5, lo_lo_lo_5}; // @[Arithmetic.scala:118:18]
wire [2:0] lo_hi_lo_5 = {lo_hi_lo_hi_5, sign_5}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_hi_hi_5 = {lo_hi_hi_hi_5, sign_5}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_hi_5 = {lo_hi_hi_5, lo_hi_lo_5}; // @[Arithmetic.scala:118:18]
wire [11:0] lo_10 = {lo_hi_5, lo_lo_5}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_lo_lo_5 = {hi_lo_lo_hi_5, sign_5}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_lo_hi_5 = {hi_lo_hi_hi_5, sign_5}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_lo_5 = {hi_lo_hi_5, hi_lo_lo_5}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_hi_lo_5 = {hi_hi_lo_hi_5, sign_5}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_hi_hi_5 = {hi_hi_hi_hi_5, sign_5}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_hi_5 = {hi_hi_hi_5, hi_hi_lo_5}; // @[Arithmetic.scala:118:18]
wire [11:0] hi_5 = {hi_hi_5, hi_lo_5}; // @[Arithmetic.scala:118:18]
wire [7:0] lo_11; // @[Arithmetic.scala:118:14]
wire sign_6 = _mvin_scale_pixel_repeater_io_resp_bits_out_6[7]; // @[Scratchpad.scala:387:43]
wire sign_22 = _mvin_scale_pixel_repeater_io_resp_bits_out_6[7]; // @[Scratchpad.scala:387:43]
wire [1:0] _GEN_40 = {2{sign_6}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] lo_lo_lo_hi_6; // @[Arithmetic.scala:118:18]
assign lo_lo_lo_hi_6 = _GEN_40; // @[Arithmetic.scala:118:18]
wire [1:0] lo_lo_hi_hi_6; // @[Arithmetic.scala:118:18]
assign lo_lo_hi_hi_6 = _GEN_40; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_lo_hi_6; // @[Arithmetic.scala:118:18]
assign lo_hi_lo_hi_6 = _GEN_40; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_hi_hi_6; // @[Arithmetic.scala:118:18]
assign lo_hi_hi_hi_6 = _GEN_40; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_lo_hi_6; // @[Arithmetic.scala:118:18]
assign hi_lo_lo_hi_6 = _GEN_40; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_hi_hi_6; // @[Arithmetic.scala:118:18]
assign hi_lo_hi_hi_6 = _GEN_40; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_lo_hi_6; // @[Arithmetic.scala:118:18]
assign hi_hi_lo_hi_6 = _GEN_40; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_hi_hi_6; // @[Arithmetic.scala:118:18]
assign hi_hi_hi_hi_6 = _GEN_40; // @[Arithmetic.scala:118:18]
wire [2:0] lo_lo_lo_6 = {lo_lo_lo_hi_6, sign_6}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_lo_hi_6 = {lo_lo_hi_hi_6, sign_6}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_lo_6 = {lo_lo_hi_6, lo_lo_lo_6}; // @[Arithmetic.scala:118:18]
wire [2:0] lo_hi_lo_6 = {lo_hi_lo_hi_6, sign_6}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_hi_hi_6 = {lo_hi_hi_hi_6, sign_6}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_hi_6 = {lo_hi_hi_6, lo_hi_lo_6}; // @[Arithmetic.scala:118:18]
wire [11:0] lo_12 = {lo_hi_6, lo_lo_6}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_lo_lo_6 = {hi_lo_lo_hi_6, sign_6}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_lo_hi_6 = {hi_lo_hi_hi_6, sign_6}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_lo_6 = {hi_lo_hi_6, hi_lo_lo_6}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_hi_lo_6 = {hi_hi_lo_hi_6, sign_6}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_hi_hi_6 = {hi_hi_hi_hi_6, sign_6}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_hi_6 = {hi_hi_hi_6, hi_hi_lo_6}; // @[Arithmetic.scala:118:18]
wire [11:0] hi_6 = {hi_hi_6, hi_lo_6}; // @[Arithmetic.scala:118:18]
wire [7:0] lo_13; // @[Arithmetic.scala:118:14]
wire sign_7 = _mvin_scale_pixel_repeater_io_resp_bits_out_7[7]; // @[Scratchpad.scala:387:43]
wire sign_23 = _mvin_scale_pixel_repeater_io_resp_bits_out_7[7]; // @[Scratchpad.scala:387:43]
wire [1:0] _GEN_41 = {2{sign_7}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] lo_lo_lo_hi_7; // @[Arithmetic.scala:118:18]
assign lo_lo_lo_hi_7 = _GEN_41; // @[Arithmetic.scala:118:18]
wire [1:0] lo_lo_hi_hi_7; // @[Arithmetic.scala:118:18]
assign lo_lo_hi_hi_7 = _GEN_41; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_lo_hi_7; // @[Arithmetic.scala:118:18]
assign lo_hi_lo_hi_7 = _GEN_41; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_hi_hi_7; // @[Arithmetic.scala:118:18]
assign lo_hi_hi_hi_7 = _GEN_41; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_lo_hi_7; // @[Arithmetic.scala:118:18]
assign hi_lo_lo_hi_7 = _GEN_41; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_hi_hi_7; // @[Arithmetic.scala:118:18]
assign hi_lo_hi_hi_7 = _GEN_41; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_lo_hi_7; // @[Arithmetic.scala:118:18]
assign hi_hi_lo_hi_7 = _GEN_41; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_hi_hi_7; // @[Arithmetic.scala:118:18]
assign hi_hi_hi_hi_7 = _GEN_41; // @[Arithmetic.scala:118:18]
wire [2:0] lo_lo_lo_7 = {lo_lo_lo_hi_7, sign_7}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_lo_hi_7 = {lo_lo_hi_hi_7, sign_7}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_lo_7 = {lo_lo_hi_7, lo_lo_lo_7}; // @[Arithmetic.scala:118:18]
wire [2:0] lo_hi_lo_7 = {lo_hi_lo_hi_7, sign_7}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_hi_hi_7 = {lo_hi_hi_hi_7, sign_7}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_hi_7 = {lo_hi_hi_7, lo_hi_lo_7}; // @[Arithmetic.scala:118:18]
wire [11:0] lo_14 = {lo_hi_7, lo_lo_7}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_lo_lo_7 = {hi_lo_lo_hi_7, sign_7}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_lo_hi_7 = {hi_lo_hi_hi_7, sign_7}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_lo_7 = {hi_lo_hi_7, hi_lo_lo_7}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_hi_lo_7 = {hi_hi_lo_hi_7, sign_7}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_hi_hi_7 = {hi_hi_hi_hi_7, sign_7}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_hi_7 = {hi_hi_hi_7, hi_hi_lo_7}; // @[Arithmetic.scala:118:18]
wire [11:0] hi_7 = {hi_hi_7, hi_lo_7}; // @[Arithmetic.scala:118:18]
wire [7:0] lo_15; // @[Arithmetic.scala:118:14]
wire sign_8 = _mvin_scale_pixel_repeater_io_resp_bits_out_8[7]; // @[Scratchpad.scala:387:43]
wire sign_24 = _mvin_scale_pixel_repeater_io_resp_bits_out_8[7]; // @[Scratchpad.scala:387:43]
wire [1:0] _GEN_42 = {2{sign_8}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] lo_lo_lo_hi_8; // @[Arithmetic.scala:118:18]
assign lo_lo_lo_hi_8 = _GEN_42; // @[Arithmetic.scala:118:18]
wire [1:0] lo_lo_hi_hi_8; // @[Arithmetic.scala:118:18]
assign lo_lo_hi_hi_8 = _GEN_42; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_lo_hi_8; // @[Arithmetic.scala:118:18]
assign lo_hi_lo_hi_8 = _GEN_42; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_hi_hi_8; // @[Arithmetic.scala:118:18]
assign lo_hi_hi_hi_8 = _GEN_42; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_lo_hi_8; // @[Arithmetic.scala:118:18]
assign hi_lo_lo_hi_8 = _GEN_42; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_hi_hi_8; // @[Arithmetic.scala:118:18]
assign hi_lo_hi_hi_8 = _GEN_42; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_lo_hi_8; // @[Arithmetic.scala:118:18]
assign hi_hi_lo_hi_8 = _GEN_42; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_hi_hi_8; // @[Arithmetic.scala:118:18]
assign hi_hi_hi_hi_8 = _GEN_42; // @[Arithmetic.scala:118:18]
wire [2:0] lo_lo_lo_8 = {lo_lo_lo_hi_8, sign_8}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_lo_hi_8 = {lo_lo_hi_hi_8, sign_8}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_lo_8 = {lo_lo_hi_8, lo_lo_lo_8}; // @[Arithmetic.scala:118:18]
wire [2:0] lo_hi_lo_8 = {lo_hi_lo_hi_8, sign_8}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_hi_hi_8 = {lo_hi_hi_hi_8, sign_8}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_hi_8 = {lo_hi_hi_8, lo_hi_lo_8}; // @[Arithmetic.scala:118:18]
wire [11:0] lo_16 = {lo_hi_8, lo_lo_8}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_lo_lo_8 = {hi_lo_lo_hi_8, sign_8}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_lo_hi_8 = {hi_lo_hi_hi_8, sign_8}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_lo_8 = {hi_lo_hi_8, hi_lo_lo_8}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_hi_lo_8 = {hi_hi_lo_hi_8, sign_8}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_hi_hi_8 = {hi_hi_hi_hi_8, sign_8}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_hi_8 = {hi_hi_hi_8, hi_hi_lo_8}; // @[Arithmetic.scala:118:18]
wire [11:0] hi_8 = {hi_hi_8, hi_lo_8}; // @[Arithmetic.scala:118:18]
wire [7:0] lo_17; // @[Arithmetic.scala:118:14]
wire sign_9 = _mvin_scale_pixel_repeater_io_resp_bits_out_9[7]; // @[Scratchpad.scala:387:43]
wire sign_25 = _mvin_scale_pixel_repeater_io_resp_bits_out_9[7]; // @[Scratchpad.scala:387:43]
wire [1:0] _GEN_43 = {2{sign_9}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] lo_lo_lo_hi_9; // @[Arithmetic.scala:118:18]
assign lo_lo_lo_hi_9 = _GEN_43; // @[Arithmetic.scala:118:18]
wire [1:0] lo_lo_hi_hi_9; // @[Arithmetic.scala:118:18]
assign lo_lo_hi_hi_9 = _GEN_43; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_lo_hi_9; // @[Arithmetic.scala:118:18]
assign lo_hi_lo_hi_9 = _GEN_43; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_hi_hi_9; // @[Arithmetic.scala:118:18]
assign lo_hi_hi_hi_9 = _GEN_43; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_lo_hi_9; // @[Arithmetic.scala:118:18]
assign hi_lo_lo_hi_9 = _GEN_43; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_hi_hi_9; // @[Arithmetic.scala:118:18]
assign hi_lo_hi_hi_9 = _GEN_43; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_lo_hi_9; // @[Arithmetic.scala:118:18]
assign hi_hi_lo_hi_9 = _GEN_43; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_hi_hi_9; // @[Arithmetic.scala:118:18]
assign hi_hi_hi_hi_9 = _GEN_43; // @[Arithmetic.scala:118:18]
wire [2:0] lo_lo_lo_9 = {lo_lo_lo_hi_9, sign_9}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_lo_hi_9 = {lo_lo_hi_hi_9, sign_9}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_lo_9 = {lo_lo_hi_9, lo_lo_lo_9}; // @[Arithmetic.scala:118:18]
wire [2:0] lo_hi_lo_9 = {lo_hi_lo_hi_9, sign_9}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_hi_hi_9 = {lo_hi_hi_hi_9, sign_9}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_hi_9 = {lo_hi_hi_9, lo_hi_lo_9}; // @[Arithmetic.scala:118:18]
wire [11:0] lo_18 = {lo_hi_9, lo_lo_9}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_lo_lo_9 = {hi_lo_lo_hi_9, sign_9}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_lo_hi_9 = {hi_lo_hi_hi_9, sign_9}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_lo_9 = {hi_lo_hi_9, hi_lo_lo_9}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_hi_lo_9 = {hi_hi_lo_hi_9, sign_9}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_hi_hi_9 = {hi_hi_hi_hi_9, sign_9}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_hi_9 = {hi_hi_hi_9, hi_hi_lo_9}; // @[Arithmetic.scala:118:18]
wire [11:0] hi_9 = {hi_hi_9, hi_lo_9}; // @[Arithmetic.scala:118:18]
wire [7:0] lo_19; // @[Arithmetic.scala:118:14]
wire sign_10 = _mvin_scale_pixel_repeater_io_resp_bits_out_10[7]; // @[Scratchpad.scala:387:43]
wire sign_26 = _mvin_scale_pixel_repeater_io_resp_bits_out_10[7]; // @[Scratchpad.scala:387:43]
wire [1:0] _GEN_44 = {2{sign_10}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] lo_lo_lo_hi_10; // @[Arithmetic.scala:118:18]
assign lo_lo_lo_hi_10 = _GEN_44; // @[Arithmetic.scala:118:18]
wire [1:0] lo_lo_hi_hi_10; // @[Arithmetic.scala:118:18]
assign lo_lo_hi_hi_10 = _GEN_44; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_lo_hi_10; // @[Arithmetic.scala:118:18]
assign lo_hi_lo_hi_10 = _GEN_44; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_hi_hi_10; // @[Arithmetic.scala:118:18]
assign lo_hi_hi_hi_10 = _GEN_44; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_lo_hi_10; // @[Arithmetic.scala:118:18]
assign hi_lo_lo_hi_10 = _GEN_44; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_hi_hi_10; // @[Arithmetic.scala:118:18]
assign hi_lo_hi_hi_10 = _GEN_44; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_lo_hi_10; // @[Arithmetic.scala:118:18]
assign hi_hi_lo_hi_10 = _GEN_44; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_hi_hi_10; // @[Arithmetic.scala:118:18]
assign hi_hi_hi_hi_10 = _GEN_44; // @[Arithmetic.scala:118:18]
wire [2:0] lo_lo_lo_10 = {lo_lo_lo_hi_10, sign_10}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_lo_hi_10 = {lo_lo_hi_hi_10, sign_10}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_lo_10 = {lo_lo_hi_10, lo_lo_lo_10}; // @[Arithmetic.scala:118:18]
wire [2:0] lo_hi_lo_10 = {lo_hi_lo_hi_10, sign_10}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_hi_hi_10 = {lo_hi_hi_hi_10, sign_10}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_hi_10 = {lo_hi_hi_10, lo_hi_lo_10}; // @[Arithmetic.scala:118:18]
wire [11:0] lo_20 = {lo_hi_10, lo_lo_10}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_lo_lo_10 = {hi_lo_lo_hi_10, sign_10}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_lo_hi_10 = {hi_lo_hi_hi_10, sign_10}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_lo_10 = {hi_lo_hi_10, hi_lo_lo_10}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_hi_lo_10 = {hi_hi_lo_hi_10, sign_10}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_hi_hi_10 = {hi_hi_hi_hi_10, sign_10}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_hi_10 = {hi_hi_hi_10, hi_hi_lo_10}; // @[Arithmetic.scala:118:18]
wire [11:0] hi_10 = {hi_hi_10, hi_lo_10}; // @[Arithmetic.scala:118:18]
wire [7:0] lo_21; // @[Arithmetic.scala:118:14]
wire sign_11 = _mvin_scale_pixel_repeater_io_resp_bits_out_11[7]; // @[Scratchpad.scala:387:43]
wire sign_27 = _mvin_scale_pixel_repeater_io_resp_bits_out_11[7]; // @[Scratchpad.scala:387:43]
wire [1:0] _GEN_45 = {2{sign_11}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] lo_lo_lo_hi_11; // @[Arithmetic.scala:118:18]
assign lo_lo_lo_hi_11 = _GEN_45; // @[Arithmetic.scala:118:18]
wire [1:0] lo_lo_hi_hi_11; // @[Arithmetic.scala:118:18]
assign lo_lo_hi_hi_11 = _GEN_45; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_lo_hi_11; // @[Arithmetic.scala:118:18]
assign lo_hi_lo_hi_11 = _GEN_45; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_hi_hi_11; // @[Arithmetic.scala:118:18]
assign lo_hi_hi_hi_11 = _GEN_45; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_lo_hi_11; // @[Arithmetic.scala:118:18]
assign hi_lo_lo_hi_11 = _GEN_45; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_hi_hi_11; // @[Arithmetic.scala:118:18]
assign hi_lo_hi_hi_11 = _GEN_45; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_lo_hi_11; // @[Arithmetic.scala:118:18]
assign hi_hi_lo_hi_11 = _GEN_45; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_hi_hi_11; // @[Arithmetic.scala:118:18]
assign hi_hi_hi_hi_11 = _GEN_45; // @[Arithmetic.scala:118:18]
wire [2:0] lo_lo_lo_11 = {lo_lo_lo_hi_11, sign_11}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_lo_hi_11 = {lo_lo_hi_hi_11, sign_11}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_lo_11 = {lo_lo_hi_11, lo_lo_lo_11}; // @[Arithmetic.scala:118:18]
wire [2:0] lo_hi_lo_11 = {lo_hi_lo_hi_11, sign_11}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_hi_hi_11 = {lo_hi_hi_hi_11, sign_11}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_hi_11 = {lo_hi_hi_11, lo_hi_lo_11}; // @[Arithmetic.scala:118:18]
wire [11:0] lo_22 = {lo_hi_11, lo_lo_11}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_lo_lo_11 = {hi_lo_lo_hi_11, sign_11}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_lo_hi_11 = {hi_lo_hi_hi_11, sign_11}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_lo_11 = {hi_lo_hi_11, hi_lo_lo_11}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_hi_lo_11 = {hi_hi_lo_hi_11, sign_11}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_hi_hi_11 = {hi_hi_hi_hi_11, sign_11}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_hi_11 = {hi_hi_hi_11, hi_hi_lo_11}; // @[Arithmetic.scala:118:18]
wire [11:0] hi_11 = {hi_hi_11, hi_lo_11}; // @[Arithmetic.scala:118:18]
wire [7:0] lo_23; // @[Arithmetic.scala:118:14]
wire sign_12 = _mvin_scale_pixel_repeater_io_resp_bits_out_12[7]; // @[Scratchpad.scala:387:43]
wire sign_28 = _mvin_scale_pixel_repeater_io_resp_bits_out_12[7]; // @[Scratchpad.scala:387:43]
wire [1:0] _GEN_46 = {2{sign_12}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] lo_lo_lo_hi_12; // @[Arithmetic.scala:118:18]
assign lo_lo_lo_hi_12 = _GEN_46; // @[Arithmetic.scala:118:18]
wire [1:0] lo_lo_hi_hi_12; // @[Arithmetic.scala:118:18]
assign lo_lo_hi_hi_12 = _GEN_46; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_lo_hi_12; // @[Arithmetic.scala:118:18]
assign lo_hi_lo_hi_12 = _GEN_46; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_hi_hi_12; // @[Arithmetic.scala:118:18]
assign lo_hi_hi_hi_12 = _GEN_46; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_lo_hi_12; // @[Arithmetic.scala:118:18]
assign hi_lo_lo_hi_12 = _GEN_46; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_hi_hi_12; // @[Arithmetic.scala:118:18]
assign hi_lo_hi_hi_12 = _GEN_46; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_lo_hi_12; // @[Arithmetic.scala:118:18]
assign hi_hi_lo_hi_12 = _GEN_46; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_hi_hi_12; // @[Arithmetic.scala:118:18]
assign hi_hi_hi_hi_12 = _GEN_46; // @[Arithmetic.scala:118:18]
wire [2:0] lo_lo_lo_12 = {lo_lo_lo_hi_12, sign_12}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_lo_hi_12 = {lo_lo_hi_hi_12, sign_12}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_lo_12 = {lo_lo_hi_12, lo_lo_lo_12}; // @[Arithmetic.scala:118:18]
wire [2:0] lo_hi_lo_12 = {lo_hi_lo_hi_12, sign_12}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_hi_hi_12 = {lo_hi_hi_hi_12, sign_12}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_hi_12 = {lo_hi_hi_12, lo_hi_lo_12}; // @[Arithmetic.scala:118:18]
wire [11:0] lo_24 = {lo_hi_12, lo_lo_12}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_lo_lo_12 = {hi_lo_lo_hi_12, sign_12}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_lo_hi_12 = {hi_lo_hi_hi_12, sign_12}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_lo_12 = {hi_lo_hi_12, hi_lo_lo_12}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_hi_lo_12 = {hi_hi_lo_hi_12, sign_12}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_hi_hi_12 = {hi_hi_hi_hi_12, sign_12}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_hi_12 = {hi_hi_hi_12, hi_hi_lo_12}; // @[Arithmetic.scala:118:18]
wire [11:0] hi_12 = {hi_hi_12, hi_lo_12}; // @[Arithmetic.scala:118:18]
wire [7:0] lo_25; // @[Arithmetic.scala:118:14]
wire sign_13 = _mvin_scale_pixel_repeater_io_resp_bits_out_13[7]; // @[Scratchpad.scala:387:43]
wire sign_29 = _mvin_scale_pixel_repeater_io_resp_bits_out_13[7]; // @[Scratchpad.scala:387:43]
wire [1:0] _GEN_47 = {2{sign_13}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] lo_lo_lo_hi_13; // @[Arithmetic.scala:118:18]
assign lo_lo_lo_hi_13 = _GEN_47; // @[Arithmetic.scala:118:18]
wire [1:0] lo_lo_hi_hi_13; // @[Arithmetic.scala:118:18]
assign lo_lo_hi_hi_13 = _GEN_47; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_lo_hi_13; // @[Arithmetic.scala:118:18]
assign lo_hi_lo_hi_13 = _GEN_47; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_hi_hi_13; // @[Arithmetic.scala:118:18]
assign lo_hi_hi_hi_13 = _GEN_47; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_lo_hi_13; // @[Arithmetic.scala:118:18]
assign hi_lo_lo_hi_13 = _GEN_47; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_hi_hi_13; // @[Arithmetic.scala:118:18]
assign hi_lo_hi_hi_13 = _GEN_47; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_lo_hi_13; // @[Arithmetic.scala:118:18]
assign hi_hi_lo_hi_13 = _GEN_47; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_hi_hi_13; // @[Arithmetic.scala:118:18]
assign hi_hi_hi_hi_13 = _GEN_47; // @[Arithmetic.scala:118:18]
wire [2:0] lo_lo_lo_13 = {lo_lo_lo_hi_13, sign_13}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_lo_hi_13 = {lo_lo_hi_hi_13, sign_13}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_lo_13 = {lo_lo_hi_13, lo_lo_lo_13}; // @[Arithmetic.scala:118:18]
wire [2:0] lo_hi_lo_13 = {lo_hi_lo_hi_13, sign_13}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_hi_hi_13 = {lo_hi_hi_hi_13, sign_13}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_hi_13 = {lo_hi_hi_13, lo_hi_lo_13}; // @[Arithmetic.scala:118:18]
wire [11:0] lo_26 = {lo_hi_13, lo_lo_13}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_lo_lo_13 = {hi_lo_lo_hi_13, sign_13}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_lo_hi_13 = {hi_lo_hi_hi_13, sign_13}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_lo_13 = {hi_lo_hi_13, hi_lo_lo_13}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_hi_lo_13 = {hi_hi_lo_hi_13, sign_13}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_hi_hi_13 = {hi_hi_hi_hi_13, sign_13}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_hi_13 = {hi_hi_hi_13, hi_hi_lo_13}; // @[Arithmetic.scala:118:18]
wire [11:0] hi_13 = {hi_hi_13, hi_lo_13}; // @[Arithmetic.scala:118:18]
wire [7:0] lo_27; // @[Arithmetic.scala:118:14]
wire sign_14 = _mvin_scale_pixel_repeater_io_resp_bits_out_14[7]; // @[Scratchpad.scala:387:43]
wire sign_30 = _mvin_scale_pixel_repeater_io_resp_bits_out_14[7]; // @[Scratchpad.scala:387:43]
wire [1:0] _GEN_48 = {2{sign_14}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] lo_lo_lo_hi_14; // @[Arithmetic.scala:118:18]
assign lo_lo_lo_hi_14 = _GEN_48; // @[Arithmetic.scala:118:18]
wire [1:0] lo_lo_hi_hi_14; // @[Arithmetic.scala:118:18]
assign lo_lo_hi_hi_14 = _GEN_48; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_lo_hi_14; // @[Arithmetic.scala:118:18]
assign lo_hi_lo_hi_14 = _GEN_48; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_hi_hi_14; // @[Arithmetic.scala:118:18]
assign lo_hi_hi_hi_14 = _GEN_48; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_lo_hi_14; // @[Arithmetic.scala:118:18]
assign hi_lo_lo_hi_14 = _GEN_48; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_hi_hi_14; // @[Arithmetic.scala:118:18]
assign hi_lo_hi_hi_14 = _GEN_48; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_lo_hi_14; // @[Arithmetic.scala:118:18]
assign hi_hi_lo_hi_14 = _GEN_48; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_hi_hi_14; // @[Arithmetic.scala:118:18]
assign hi_hi_hi_hi_14 = _GEN_48; // @[Arithmetic.scala:118:18]
wire [2:0] lo_lo_lo_14 = {lo_lo_lo_hi_14, sign_14}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_lo_hi_14 = {lo_lo_hi_hi_14, sign_14}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_lo_14 = {lo_lo_hi_14, lo_lo_lo_14}; // @[Arithmetic.scala:118:18]
wire [2:0] lo_hi_lo_14 = {lo_hi_lo_hi_14, sign_14}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_hi_hi_14 = {lo_hi_hi_hi_14, sign_14}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_hi_14 = {lo_hi_hi_14, lo_hi_lo_14}; // @[Arithmetic.scala:118:18]
wire [11:0] lo_28 = {lo_hi_14, lo_lo_14}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_lo_lo_14 = {hi_lo_lo_hi_14, sign_14}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_lo_hi_14 = {hi_lo_hi_hi_14, sign_14}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_lo_14 = {hi_lo_hi_14, hi_lo_lo_14}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_hi_lo_14 = {hi_hi_lo_hi_14, sign_14}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_hi_hi_14 = {hi_hi_hi_hi_14, sign_14}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_hi_14 = {hi_hi_hi_14, hi_hi_lo_14}; // @[Arithmetic.scala:118:18]
wire [11:0] hi_14 = {hi_hi_14, hi_lo_14}; // @[Arithmetic.scala:118:18]
wire [7:0] lo_29; // @[Arithmetic.scala:118:14]
wire sign_15 = _mvin_scale_pixel_repeater_io_resp_bits_out_15[7]; // @[Scratchpad.scala:387:43]
wire sign_31 = _mvin_scale_pixel_repeater_io_resp_bits_out_15[7]; // @[Scratchpad.scala:387:43]
wire [1:0] _GEN_49 = {2{sign_15}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] lo_lo_lo_hi_15; // @[Arithmetic.scala:118:18]
assign lo_lo_lo_hi_15 = _GEN_49; // @[Arithmetic.scala:118:18]
wire [1:0] lo_lo_hi_hi_15; // @[Arithmetic.scala:118:18]
assign lo_lo_hi_hi_15 = _GEN_49; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_lo_hi_15; // @[Arithmetic.scala:118:18]
assign lo_hi_lo_hi_15 = _GEN_49; // @[Arithmetic.scala:118:18]
wire [1:0] lo_hi_hi_hi_15; // @[Arithmetic.scala:118:18]
assign lo_hi_hi_hi_15 = _GEN_49; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_lo_hi_15; // @[Arithmetic.scala:118:18]
assign hi_lo_lo_hi_15 = _GEN_49; // @[Arithmetic.scala:118:18]
wire [1:0] hi_lo_hi_hi_15; // @[Arithmetic.scala:118:18]
assign hi_lo_hi_hi_15 = _GEN_49; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_lo_hi_15; // @[Arithmetic.scala:118:18]
assign hi_hi_lo_hi_15 = _GEN_49; // @[Arithmetic.scala:118:18]
wire [1:0] hi_hi_hi_hi_15; // @[Arithmetic.scala:118:18]
assign hi_hi_hi_hi_15 = _GEN_49; // @[Arithmetic.scala:118:18]
wire [2:0] lo_lo_lo_15 = {lo_lo_lo_hi_15, sign_15}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_lo_hi_15 = {lo_lo_hi_hi_15, sign_15}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_lo_15 = {lo_lo_hi_15, lo_lo_lo_15}; // @[Arithmetic.scala:118:18]
wire [2:0] lo_hi_lo_15 = {lo_hi_lo_hi_15, sign_15}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] lo_hi_hi_15 = {lo_hi_hi_hi_15, sign_15}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] lo_hi_15 = {lo_hi_hi_15, lo_hi_lo_15}; // @[Arithmetic.scala:118:18]
wire [11:0] lo_30 = {lo_hi_15, lo_lo_15}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_lo_lo_15 = {hi_lo_lo_hi_15, sign_15}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_lo_hi_15 = {hi_lo_hi_hi_15, sign_15}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_lo_15 = {hi_lo_hi_15, hi_lo_lo_15}; // @[Arithmetic.scala:118:18]
wire [2:0] hi_hi_lo_15 = {hi_hi_lo_hi_15, sign_15}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] hi_hi_hi_15 = {hi_hi_hi_hi_15, sign_15}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] hi_hi_15 = {hi_hi_hi_15, hi_hi_lo_15}; // @[Arithmetic.scala:118:18]
wire [11:0] hi_15 = {hi_hi_15, hi_lo_15}; // @[Arithmetic.scala:118:18]
wire [7:0] lo_31; // @[Arithmetic.scala:118:14]
wire [63:0] lo_lo_lo_16 = {hi_1, lo_2, lo_3, hi, lo, lo_1}; // @[Scratchpad.scala:790:106]
wire [63:0] lo_lo_hi_16 = {hi_3, lo_6, lo_7, hi_2, lo_4, lo_5}; // @[Scratchpad.scala:790:106]
wire [127:0] lo_lo_16 = {lo_lo_hi_16, lo_lo_lo_16}; // @[Scratchpad.scala:790:106]
wire [63:0] lo_hi_lo_16 = {hi_5, lo_10, lo_11, hi_4, lo_8, lo_9}; // @[Scratchpad.scala:790:106]
wire [63:0] lo_hi_hi_16 = {hi_7, lo_14, lo_15, hi_6, lo_12, lo_13}; // @[Scratchpad.scala:790:106]
wire [127:0] lo_hi_16 = {lo_hi_hi_16, lo_hi_lo_16}; // @[Scratchpad.scala:790:106]
wire [255:0] lo_32 = {lo_hi_16, lo_lo_16}; // @[Scratchpad.scala:790:106]
wire [63:0] hi_lo_lo_16 = {hi_9, lo_18, lo_19, hi_8, lo_16, lo_17}; // @[Scratchpad.scala:790:106]
wire [63:0] hi_lo_hi_16 = {hi_11, lo_22, lo_23, hi_10, lo_20, lo_21}; // @[Scratchpad.scala:790:106]
wire [127:0] hi_lo_16 = {hi_lo_hi_16, hi_lo_lo_16}; // @[Scratchpad.scala:790:106]
wire [63:0] hi_hi_lo_16 = {hi_13, lo_26, lo_27, hi_12, lo_24, lo_25}; // @[Scratchpad.scala:790:106]
wire [63:0] hi_hi_hi_16 = {hi_15, lo_30, lo_31, hi_14, lo_28, lo_29}; // @[Scratchpad.scala:790:106]
wire [127:0] hi_hi_16 = {hi_hi_hi_16, hi_hi_lo_16}; // @[Scratchpad.scala:790:106]
wire [255:0] hi_16 = {hi_hi_16, hi_lo_16}; // @[Scratchpad.scala:790:106]
wire [63:0] _GEN_50 = {_vsm_1_io_resp_bits_out_1, _vsm_1_io_resp_bits_out_0}; // @[VectorScalarMultiplier.scala:200:21]
wire [63:0] lo_lo_lo_17; // @[Scratchpad.scala:791:49]
assign lo_lo_lo_17 = _GEN_50; // @[Scratchpad.scala:791:49]
wire [63:0] lo_lo_lo_35; // @[Scratchpad.scala:791:49]
assign lo_lo_lo_35 = _GEN_50; // @[Scratchpad.scala:791:49]
wire [63:0] _GEN_51 = {_vsm_1_io_resp_bits_out_3, _vsm_1_io_resp_bits_out_2}; // @[VectorScalarMultiplier.scala:200:21]
wire [63:0] lo_lo_hi_17; // @[Scratchpad.scala:791:49]
assign lo_lo_hi_17 = _GEN_51; // @[Scratchpad.scala:791:49]
wire [63:0] lo_lo_hi_35; // @[Scratchpad.scala:791:49]
assign lo_lo_hi_35 = _GEN_51; // @[Scratchpad.scala:791:49]
wire [127:0] lo_lo_17 = {lo_lo_hi_17, lo_lo_lo_17}; // @[Scratchpad.scala:791:49]
wire [63:0] _GEN_52 = {_vsm_1_io_resp_bits_out_5, _vsm_1_io_resp_bits_out_4}; // @[VectorScalarMultiplier.scala:200:21]
wire [63:0] lo_hi_lo_17; // @[Scratchpad.scala:791:49]
assign lo_hi_lo_17 = _GEN_52; // @[Scratchpad.scala:791:49]
wire [63:0] lo_hi_lo_35; // @[Scratchpad.scala:791:49]
assign lo_hi_lo_35 = _GEN_52; // @[Scratchpad.scala:791:49]
wire [63:0] _GEN_53 = {_vsm_1_io_resp_bits_out_7, _vsm_1_io_resp_bits_out_6}; // @[VectorScalarMultiplier.scala:200:21]
wire [63:0] lo_hi_hi_17; // @[Scratchpad.scala:791:49]
assign lo_hi_hi_17 = _GEN_53; // @[Scratchpad.scala:791:49]
wire [63:0] lo_hi_hi_35; // @[Scratchpad.scala:791:49]
assign lo_hi_hi_35 = _GEN_53; // @[Scratchpad.scala:791:49]
wire [127:0] lo_hi_17 = {lo_hi_hi_17, lo_hi_lo_17}; // @[Scratchpad.scala:791:49]
wire [255:0] lo_33 = {lo_hi_17, lo_lo_17}; // @[Scratchpad.scala:791:49]
wire [63:0] _GEN_54 = {_vsm_1_io_resp_bits_out_9, _vsm_1_io_resp_bits_out_8}; // @[VectorScalarMultiplier.scala:200:21]
wire [63:0] hi_lo_lo_17; // @[Scratchpad.scala:791:49]
assign hi_lo_lo_17 = _GEN_54; // @[Scratchpad.scala:791:49]
wire [63:0] hi_lo_lo_35; // @[Scratchpad.scala:791:49]
assign hi_lo_lo_35 = _GEN_54; // @[Scratchpad.scala:791:49]
wire [63:0] _GEN_55 = {_vsm_1_io_resp_bits_out_11, _vsm_1_io_resp_bits_out_10}; // @[VectorScalarMultiplier.scala:200:21]
wire [63:0] hi_lo_hi_17; // @[Scratchpad.scala:791:49]
assign hi_lo_hi_17 = _GEN_55; // @[Scratchpad.scala:791:49]
wire [63:0] hi_lo_hi_35; // @[Scratchpad.scala:791:49]
assign hi_lo_hi_35 = _GEN_55; // @[Scratchpad.scala:791:49]
wire [127:0] hi_lo_17 = {hi_lo_hi_17, hi_lo_lo_17}; // @[Scratchpad.scala:791:49]
wire [63:0] _GEN_56 = {_vsm_1_io_resp_bits_out_13, _vsm_1_io_resp_bits_out_12}; // @[VectorScalarMultiplier.scala:200:21]
wire [63:0] hi_hi_lo_17; // @[Scratchpad.scala:791:49]
assign hi_hi_lo_17 = _GEN_56; // @[Scratchpad.scala:791:49]
wire [63:0] hi_hi_lo_35; // @[Scratchpad.scala:791:49]
assign hi_hi_lo_35 = _GEN_56; // @[Scratchpad.scala:791:49]
wire [63:0] _GEN_57 = {_vsm_1_io_resp_bits_out_15, _vsm_1_io_resp_bits_out_14}; // @[VectorScalarMultiplier.scala:200:21]
wire [63:0] hi_hi_hi_17; // @[Scratchpad.scala:791:49]
assign hi_hi_hi_17 = _GEN_57; // @[Scratchpad.scala:791:49]
wire [63:0] hi_hi_hi_35; // @[Scratchpad.scala:791:49]
assign hi_hi_hi_35 = _GEN_57; // @[Scratchpad.scala:791:49]
wire [127:0] hi_hi_17 = {hi_hi_hi_17, hi_hi_lo_17}; // @[Scratchpad.scala:791:49]
wire [255:0] hi_17 = {hi_hi_17, hi_lo_17}; // @[Scratchpad.scala:791:49]
wire expanded_1_0; // @[Scratchpad.scala:798:39]
wire expanded_1_1; // @[Scratchpad.scala:798:39]
wire expanded_1_2; // @[Scratchpad.scala:798:39]
wire expanded_1_3; // @[Scratchpad.scala:798:39]
wire expanded_1_4; // @[Scratchpad.scala:798:39]
wire expanded_1_5; // @[Scratchpad.scala:798:39]
wire expanded_1_6; // @[Scratchpad.scala:798:39]
wire expanded_1_7; // @[Scratchpad.scala:798:39]
wire expanded_1_8; // @[Scratchpad.scala:798:39]
wire expanded_1_9; // @[Scratchpad.scala:798:39]
wire expanded_1_10; // @[Scratchpad.scala:798:39]
wire expanded_1_11; // @[Scratchpad.scala:798:39]
wire expanded_1_12; // @[Scratchpad.scala:798:39]
wire expanded_1_13; // @[Scratchpad.scala:798:39]
wire expanded_1_14; // @[Scratchpad.scala:798:39]
wire expanded_1_15; // @[Scratchpad.scala:798:39]
wire expanded_1_16; // @[Scratchpad.scala:798:39]
wire expanded_1_17; // @[Scratchpad.scala:798:39]
wire expanded_1_18; // @[Scratchpad.scala:798:39]
wire expanded_1_19; // @[Scratchpad.scala:798:39]
wire expanded_1_20; // @[Scratchpad.scala:798:39]
wire expanded_1_21; // @[Scratchpad.scala:798:39]
wire expanded_1_22; // @[Scratchpad.scala:798:39]
wire expanded_1_23; // @[Scratchpad.scala:798:39]
wire expanded_1_24; // @[Scratchpad.scala:798:39]
wire expanded_1_25; // @[Scratchpad.scala:798:39]
wire expanded_1_26; // @[Scratchpad.scala:798:39]
wire expanded_1_27; // @[Scratchpad.scala:798:39]
wire expanded_1_28; // @[Scratchpad.scala:798:39]
wire expanded_1_29; // @[Scratchpad.scala:798:39]
wire expanded_1_30; // @[Scratchpad.scala:798:39]
wire expanded_1_31; // @[Scratchpad.scala:798:39]
wire expanded_1_32; // @[Scratchpad.scala:798:39]
wire expanded_1_33; // @[Scratchpad.scala:798:39]
wire expanded_1_34; // @[Scratchpad.scala:798:39]
wire expanded_1_35; // @[Scratchpad.scala:798:39]
wire expanded_1_36; // @[Scratchpad.scala:798:39]
wire expanded_1_37; // @[Scratchpad.scala:798:39]
wire expanded_1_38; // @[Scratchpad.scala:798:39]
wire expanded_1_39; // @[Scratchpad.scala:798:39]
wire expanded_1_40; // @[Scratchpad.scala:798:39]
wire expanded_1_41; // @[Scratchpad.scala:798:39]
wire expanded_1_42; // @[Scratchpad.scala:798:39]
wire expanded_1_43; // @[Scratchpad.scala:798:39]
wire expanded_1_44; // @[Scratchpad.scala:798:39]
wire expanded_1_45; // @[Scratchpad.scala:798:39]
wire expanded_1_46; // @[Scratchpad.scala:798:39]
wire expanded_1_47; // @[Scratchpad.scala:798:39]
wire expanded_1_48; // @[Scratchpad.scala:798:39]
wire expanded_1_49; // @[Scratchpad.scala:798:39]
wire expanded_1_50; // @[Scratchpad.scala:798:39]
wire expanded_1_51; // @[Scratchpad.scala:798:39]
wire expanded_1_52; // @[Scratchpad.scala:798:39]
wire expanded_1_53; // @[Scratchpad.scala:798:39]
wire expanded_1_54; // @[Scratchpad.scala:798:39]
wire expanded_1_55; // @[Scratchpad.scala:798:39]
wire expanded_1_56; // @[Scratchpad.scala:798:39]
wire expanded_1_57; // @[Scratchpad.scala:798:39]
wire expanded_1_58; // @[Scratchpad.scala:798:39]
wire expanded_1_59; // @[Scratchpad.scala:798:39]
wire expanded_1_60; // @[Scratchpad.scala:798:39]
wire expanded_1_61; // @[Scratchpad.scala:798:39]
wire expanded_1_62; // @[Scratchpad.scala:798:39]
wire expanded_1_63; // @[Scratchpad.scala:798:39]
wire _T_301 = zerowrite_4 & ~spad_last; // @[Scratchpad.scala:740:112, :751:76, :786:32, :808:31]
wire _GEN_58 = io_acc_write_0_valid_0 | _T_149; // @[Scratchpad.scala:205:9, :782:24, :783:27, :786:{29,72}, :787:27, :808:74]
assign bank_ios_1_0_write_valid = _GEN_58 | _T_301; // @[Scratchpad.scala:637:29, :782:24, :783:27, :786:72, :787:27, :808:{31,74}]
wire expanded_2_0; // @[Scratchpad.scala:814:35]
wire expanded_2_1; // @[Scratchpad.scala:814:35]
wire expanded_2_2; // @[Scratchpad.scala:814:35]
wire expanded_2_3; // @[Scratchpad.scala:814:35]
wire expanded_2_4; // @[Scratchpad.scala:814:35]
wire expanded_2_5; // @[Scratchpad.scala:814:35]
wire expanded_2_6; // @[Scratchpad.scala:814:35]
wire expanded_2_7; // @[Scratchpad.scala:814:35]
wire expanded_2_8; // @[Scratchpad.scala:814:35]
wire expanded_2_9; // @[Scratchpad.scala:814:35]
wire expanded_2_10; // @[Scratchpad.scala:814:35]
wire expanded_2_11; // @[Scratchpad.scala:814:35]
wire expanded_2_12; // @[Scratchpad.scala:814:35]
wire expanded_2_13; // @[Scratchpad.scala:814:35]
wire expanded_2_14; // @[Scratchpad.scala:814:35]
wire expanded_2_15; // @[Scratchpad.scala:814:35]
wire expanded_2_16; // @[Scratchpad.scala:814:35]
wire expanded_2_17; // @[Scratchpad.scala:814:35]
wire expanded_2_18; // @[Scratchpad.scala:814:35]
wire expanded_2_19; // @[Scratchpad.scala:814:35]
wire expanded_2_20; // @[Scratchpad.scala:814:35]
wire expanded_2_21; // @[Scratchpad.scala:814:35]
wire expanded_2_22; // @[Scratchpad.scala:814:35]
wire expanded_2_23; // @[Scratchpad.scala:814:35]
wire expanded_2_24; // @[Scratchpad.scala:814:35]
wire expanded_2_25; // @[Scratchpad.scala:814:35]
wire expanded_2_26; // @[Scratchpad.scala:814:35]
wire expanded_2_27; // @[Scratchpad.scala:814:35]
wire expanded_2_28; // @[Scratchpad.scala:814:35]
wire expanded_2_29; // @[Scratchpad.scala:814:35]
wire expanded_2_30; // @[Scratchpad.scala:814:35]
wire expanded_2_31; // @[Scratchpad.scala:814:35]
wire expanded_2_32; // @[Scratchpad.scala:814:35]
wire expanded_2_33; // @[Scratchpad.scala:814:35]
wire expanded_2_34; // @[Scratchpad.scala:814:35]
wire expanded_2_35; // @[Scratchpad.scala:814:35]
wire expanded_2_36; // @[Scratchpad.scala:814:35]
wire expanded_2_37; // @[Scratchpad.scala:814:35]
wire expanded_2_38; // @[Scratchpad.scala:814:35]
wire expanded_2_39; // @[Scratchpad.scala:814:35]
wire expanded_2_40; // @[Scratchpad.scala:814:35]
wire expanded_2_41; // @[Scratchpad.scala:814:35]
wire expanded_2_42; // @[Scratchpad.scala:814:35]
wire expanded_2_43; // @[Scratchpad.scala:814:35]
wire expanded_2_44; // @[Scratchpad.scala:814:35]
wire expanded_2_45; // @[Scratchpad.scala:814:35]
wire expanded_2_46; // @[Scratchpad.scala:814:35]
wire expanded_2_47; // @[Scratchpad.scala:814:35]
wire expanded_2_48; // @[Scratchpad.scala:814:35]
wire expanded_2_49; // @[Scratchpad.scala:814:35]
wire expanded_2_50; // @[Scratchpad.scala:814:35]
wire expanded_2_51; // @[Scratchpad.scala:814:35]
wire expanded_2_52; // @[Scratchpad.scala:814:35]
wire expanded_2_53; // @[Scratchpad.scala:814:35]
wire expanded_2_54; // @[Scratchpad.scala:814:35]
wire expanded_2_55; // @[Scratchpad.scala:814:35]
wire expanded_2_56; // @[Scratchpad.scala:814:35]
wire expanded_2_57; // @[Scratchpad.scala:814:35]
wire expanded_2_58; // @[Scratchpad.scala:814:35]
wire expanded_2_59; // @[Scratchpad.scala:814:35]
wire expanded_2_60; // @[Scratchpad.scala:814:35]
wire expanded_2_61; // @[Scratchpad.scala:814:35]
wire expanded_2_62; // @[Scratchpad.scala:814:35]
wire expanded_2_63; // @[Scratchpad.scala:814:35]
assign bank_ios_1_0_write_bits_data_0_0 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_data_0_0_0 : _T_149 ? (from_mvin_scale ? lo_32[31:0] : lo_33[31:0]) : 32'h0; // @[Scratchpad.scala:205:9, :637:29, :727:71, :782:24, :784:31, :786:{29,72}, :788:{31,37}, :790:106, :791:49, :808:74]
assign bank_ios_1_0_write_bits_data_1_0 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_data_1_0_0 : _T_149 ? (from_mvin_scale ? lo_32[63:32] : lo_33[63:32]) : 32'h0; // @[Scratchpad.scala:205:9, :637:29, :727:71, :782:24, :784:31, :786:{29,72}, :788:{31,37}, :790:106, :791:49, :808:74]
assign bank_ios_1_0_write_bits_data_2_0 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_data_2_0_0 : _T_149 ? (from_mvin_scale ? lo_32[95:64] : lo_33[95:64]) : 32'h0; // @[Scratchpad.scala:205:9, :637:29, :727:71, :782:24, :784:31, :786:{29,72}, :788:{31,37}, :790:106, :791:49, :808:74]
assign bank_ios_1_0_write_bits_data_3_0 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_data_3_0_0 : _T_149 ? (from_mvin_scale ? lo_32[127:96] : lo_33[127:96]) : 32'h0; // @[Scratchpad.scala:205:9, :637:29, :727:71, :782:24, :784:31, :786:{29,72}, :788:{31,37}, :790:106, :791:49, :808:74]
assign bank_ios_1_0_write_bits_data_4_0 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_data_4_0_0 : _T_149 ? (from_mvin_scale ? lo_32[159:128] : lo_33[159:128]) : 32'h0; // @[Scratchpad.scala:205:9, :637:29, :727:71, :782:24, :784:31, :786:{29,72}, :788:{31,37}, :790:106, :791:49, :808:74]
assign bank_ios_1_0_write_bits_data_5_0 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_data_5_0_0 : _T_149 ? (from_mvin_scale ? lo_32[191:160] : lo_33[191:160]) : 32'h0; // @[Scratchpad.scala:205:9, :637:29, :727:71, :782:24, :784:31, :786:{29,72}, :788:{31,37}, :790:106, :791:49, :808:74]
assign bank_ios_1_0_write_bits_data_6_0 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_data_6_0_0 : _T_149 ? (from_mvin_scale ? lo_32[223:192] : lo_33[223:192]) : 32'h0; // @[Scratchpad.scala:205:9, :637:29, :727:71, :782:24, :784:31, :786:{29,72}, :788:{31,37}, :790:106, :791:49, :808:74]
assign bank_ios_1_0_write_bits_data_7_0 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_data_7_0_0 : _T_149 ? (from_mvin_scale ? lo_32[255:224] : lo_33[255:224]) : 32'h0; // @[Scratchpad.scala:205:9, :637:29, :727:71, :782:24, :784:31, :786:{29,72}, :788:{31,37}, :790:106, :791:49, :808:74]
assign bank_ios_1_0_write_bits_data_8_0 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_data_8_0_0 : _T_149 ? (from_mvin_scale ? hi_16[31:0] : hi_17[31:0]) : 32'h0; // @[Scratchpad.scala:205:9, :637:29, :727:71, :782:24, :784:31, :786:{29,72}, :788:{31,37}, :790:106, :791:49, :808:74]
assign bank_ios_1_0_write_bits_data_9_0 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_data_9_0_0 : _T_149 ? (from_mvin_scale ? hi_16[63:32] : hi_17[63:32]) : 32'h0; // @[Scratchpad.scala:205:9, :637:29, :727:71, :782:24, :784:31, :786:{29,72}, :788:{31,37}, :790:106, :791:49, :808:74]
assign bank_ios_1_0_write_bits_data_10_0 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_data_10_0_0 : _T_149 ? (from_mvin_scale ? hi_16[95:64] : hi_17[95:64]) : 32'h0; // @[Scratchpad.scala:205:9, :637:29, :727:71, :782:24, :784:31, :786:{29,72}, :788:{31,37}, :790:106, :791:49, :808:74]
assign bank_ios_1_0_write_bits_data_11_0 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_data_11_0_0 : _T_149 ? (from_mvin_scale ? hi_16[127:96] : hi_17[127:96]) : 32'h0; // @[Scratchpad.scala:205:9, :637:29, :727:71, :782:24, :784:31, :786:{29,72}, :788:{31,37}, :790:106, :791:49, :808:74]
assign bank_ios_1_0_write_bits_data_12_0 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_data_12_0_0 : _T_149 ? (from_mvin_scale ? hi_16[159:128] : hi_17[159:128]) : 32'h0; // @[Scratchpad.scala:205:9, :637:29, :727:71, :782:24, :784:31, :786:{29,72}, :788:{31,37}, :790:106, :791:49, :808:74]
assign bank_ios_1_0_write_bits_data_13_0 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_data_13_0_0 : _T_149 ? (from_mvin_scale ? hi_16[191:160] : hi_17[191:160]) : 32'h0; // @[Scratchpad.scala:205:9, :637:29, :727:71, :782:24, :784:31, :786:{29,72}, :788:{31,37}, :790:106, :791:49, :808:74]
assign bank_ios_1_0_write_bits_data_14_0 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_data_14_0_0 : _T_149 ? (from_mvin_scale ? hi_16[223:192] : hi_17[223:192]) : 32'h0; // @[Scratchpad.scala:205:9, :637:29, :727:71, :782:24, :784:31, :786:{29,72}, :788:{31,37}, :790:106, :791:49, :808:74]
assign bank_ios_1_0_write_bits_data_15_0 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_data_15_0_0 : _T_149 ? (from_mvin_scale ? hi_16[255:224] : hi_17[255:224]) : 32'h0; // @[Scratchpad.scala:205:9, :637:29, :727:71, :782:24, :784:31, :786:{29,72}, :788:{31,37}, :790:106, :791:49, :808:74]
assign bank_ios_1_0_write_bits_mask_0 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_0_0 : _T_149 ? (from_mvin_scale ? expanded_1_0 : _vsm_1_io_resp_bits_tag_mask_0) : expanded_2_0; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_1 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_1_0 : _T_149 ? (from_mvin_scale ? expanded_1_1 : _vsm_1_io_resp_bits_tag_mask_1) : expanded_2_1; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_2 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_2_0 : _T_149 ? (from_mvin_scale ? expanded_1_2 : _vsm_1_io_resp_bits_tag_mask_2) : expanded_2_2; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_3 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_3_0 : _T_149 ? (from_mvin_scale ? expanded_1_3 : _vsm_1_io_resp_bits_tag_mask_3) : expanded_2_3; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_4 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_4_0 : _T_149 ? (from_mvin_scale ? expanded_1_4 : _vsm_1_io_resp_bits_tag_mask_4) : expanded_2_4; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_5 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_5_0 : _T_149 ? (from_mvin_scale ? expanded_1_5 : _vsm_1_io_resp_bits_tag_mask_5) : expanded_2_5; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_6 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_6_0 : _T_149 ? (from_mvin_scale ? expanded_1_6 : _vsm_1_io_resp_bits_tag_mask_6) : expanded_2_6; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_7 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_7_0 : _T_149 ? (from_mvin_scale ? expanded_1_7 : _vsm_1_io_resp_bits_tag_mask_7) : expanded_2_7; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_8 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_8_0 : _T_149 ? (from_mvin_scale ? expanded_1_8 : _vsm_1_io_resp_bits_tag_mask_8) : expanded_2_8; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_9 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_9_0 : _T_149 ? (from_mvin_scale ? expanded_1_9 : _vsm_1_io_resp_bits_tag_mask_9) : expanded_2_9; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_10 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_10_0 : _T_149 ? (from_mvin_scale ? expanded_1_10 : _vsm_1_io_resp_bits_tag_mask_10) : expanded_2_10; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_11 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_11_0 : _T_149 ? (from_mvin_scale ? expanded_1_11 : _vsm_1_io_resp_bits_tag_mask_11) : expanded_2_11; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_12 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_12_0 : _T_149 ? (from_mvin_scale ? expanded_1_12 : _vsm_1_io_resp_bits_tag_mask_12) : expanded_2_12; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_13 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_13_0 : _T_149 ? (from_mvin_scale ? expanded_1_13 : _vsm_1_io_resp_bits_tag_mask_13) : expanded_2_13; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_14 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_14_0 : _T_149 ? (from_mvin_scale ? expanded_1_14 : _vsm_1_io_resp_bits_tag_mask_14) : expanded_2_14; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_15 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_15_0 : _T_149 ? (from_mvin_scale ? expanded_1_15 : _vsm_1_io_resp_bits_tag_mask_15) : expanded_2_15; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_16 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_16_0 : _T_149 ? (from_mvin_scale ? expanded_1_16 : _vsm_1_io_resp_bits_tag_mask_16) : expanded_2_16; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_17 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_17_0 : _T_149 ? (from_mvin_scale ? expanded_1_17 : _vsm_1_io_resp_bits_tag_mask_17) : expanded_2_17; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_18 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_18_0 : _T_149 ? (from_mvin_scale ? expanded_1_18 : _vsm_1_io_resp_bits_tag_mask_18) : expanded_2_18; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_19 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_19_0 : _T_149 ? (from_mvin_scale ? expanded_1_19 : _vsm_1_io_resp_bits_tag_mask_19) : expanded_2_19; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_20 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_20_0 : _T_149 ? (from_mvin_scale ? expanded_1_20 : _vsm_1_io_resp_bits_tag_mask_20) : expanded_2_20; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_21 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_21_0 : _T_149 ? (from_mvin_scale ? expanded_1_21 : _vsm_1_io_resp_bits_tag_mask_21) : expanded_2_21; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_22 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_22_0 : _T_149 ? (from_mvin_scale ? expanded_1_22 : _vsm_1_io_resp_bits_tag_mask_22) : expanded_2_22; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_23 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_23_0 : _T_149 ? (from_mvin_scale ? expanded_1_23 : _vsm_1_io_resp_bits_tag_mask_23) : expanded_2_23; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_24 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_24_0 : _T_149 ? (from_mvin_scale ? expanded_1_24 : _vsm_1_io_resp_bits_tag_mask_24) : expanded_2_24; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_25 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_25_0 : _T_149 ? (from_mvin_scale ? expanded_1_25 : _vsm_1_io_resp_bits_tag_mask_25) : expanded_2_25; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_26 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_26_0 : _T_149 ? (from_mvin_scale ? expanded_1_26 : _vsm_1_io_resp_bits_tag_mask_26) : expanded_2_26; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_27 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_27_0 : _T_149 ? (from_mvin_scale ? expanded_1_27 : _vsm_1_io_resp_bits_tag_mask_27) : expanded_2_27; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_28 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_28_0 : _T_149 ? (from_mvin_scale ? expanded_1_28 : _vsm_1_io_resp_bits_tag_mask_28) : expanded_2_28; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_29 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_29_0 : _T_149 ? (from_mvin_scale ? expanded_1_29 : _vsm_1_io_resp_bits_tag_mask_29) : expanded_2_29; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_30 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_30_0 : _T_149 ? (from_mvin_scale ? expanded_1_30 : _vsm_1_io_resp_bits_tag_mask_30) : expanded_2_30; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_31 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_31_0 : _T_149 ? (from_mvin_scale ? expanded_1_31 : _vsm_1_io_resp_bits_tag_mask_31) : expanded_2_31; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_32 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_32_0 : _T_149 ? (from_mvin_scale ? expanded_1_32 : _vsm_1_io_resp_bits_tag_mask_32) : expanded_2_32; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_33 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_33_0 : _T_149 ? (from_mvin_scale ? expanded_1_33 : _vsm_1_io_resp_bits_tag_mask_33) : expanded_2_33; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_34 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_34_0 : _T_149 ? (from_mvin_scale ? expanded_1_34 : _vsm_1_io_resp_bits_tag_mask_34) : expanded_2_34; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_35 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_35_0 : _T_149 ? (from_mvin_scale ? expanded_1_35 : _vsm_1_io_resp_bits_tag_mask_35) : expanded_2_35; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_36 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_36_0 : _T_149 ? (from_mvin_scale ? expanded_1_36 : _vsm_1_io_resp_bits_tag_mask_36) : expanded_2_36; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_37 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_37_0 : _T_149 ? (from_mvin_scale ? expanded_1_37 : _vsm_1_io_resp_bits_tag_mask_37) : expanded_2_37; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_38 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_38_0 : _T_149 ? (from_mvin_scale ? expanded_1_38 : _vsm_1_io_resp_bits_tag_mask_38) : expanded_2_38; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_39 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_39_0 : _T_149 ? (from_mvin_scale ? expanded_1_39 : _vsm_1_io_resp_bits_tag_mask_39) : expanded_2_39; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_40 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_40_0 : _T_149 ? (from_mvin_scale ? expanded_1_40 : _vsm_1_io_resp_bits_tag_mask_40) : expanded_2_40; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_41 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_41_0 : _T_149 ? (from_mvin_scale ? expanded_1_41 : _vsm_1_io_resp_bits_tag_mask_41) : expanded_2_41; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_42 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_42_0 : _T_149 ? (from_mvin_scale ? expanded_1_42 : _vsm_1_io_resp_bits_tag_mask_42) : expanded_2_42; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_43 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_43_0 : _T_149 ? (from_mvin_scale ? expanded_1_43 : _vsm_1_io_resp_bits_tag_mask_43) : expanded_2_43; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_44 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_44_0 : _T_149 ? (from_mvin_scale ? expanded_1_44 : _vsm_1_io_resp_bits_tag_mask_44) : expanded_2_44; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_45 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_45_0 : _T_149 ? (from_mvin_scale ? expanded_1_45 : _vsm_1_io_resp_bits_tag_mask_45) : expanded_2_45; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_46 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_46_0 : _T_149 ? (from_mvin_scale ? expanded_1_46 : _vsm_1_io_resp_bits_tag_mask_46) : expanded_2_46; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_47 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_47_0 : _T_149 ? (from_mvin_scale ? expanded_1_47 : _vsm_1_io_resp_bits_tag_mask_47) : expanded_2_47; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_48 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_48_0 : _T_149 ? (from_mvin_scale ? expanded_1_48 : _vsm_1_io_resp_bits_tag_mask_48) : expanded_2_48; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_49 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_49_0 : _T_149 ? (from_mvin_scale ? expanded_1_49 : _vsm_1_io_resp_bits_tag_mask_49) : expanded_2_49; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_50 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_50_0 : _T_149 ? (from_mvin_scale ? expanded_1_50 : _vsm_1_io_resp_bits_tag_mask_50) : expanded_2_50; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_51 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_51_0 : _T_149 ? (from_mvin_scale ? expanded_1_51 : _vsm_1_io_resp_bits_tag_mask_51) : expanded_2_51; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_52 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_52_0 : _T_149 ? (from_mvin_scale ? expanded_1_52 : _vsm_1_io_resp_bits_tag_mask_52) : expanded_2_52; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_53 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_53_0 : _T_149 ? (from_mvin_scale ? expanded_1_53 : _vsm_1_io_resp_bits_tag_mask_53) : expanded_2_53; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_54 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_54_0 : _T_149 ? (from_mvin_scale ? expanded_1_54 : _vsm_1_io_resp_bits_tag_mask_54) : expanded_2_54; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_55 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_55_0 : _T_149 ? (from_mvin_scale ? expanded_1_55 : _vsm_1_io_resp_bits_tag_mask_55) : expanded_2_55; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_56 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_56_0 : _T_149 ? (from_mvin_scale ? expanded_1_56 : _vsm_1_io_resp_bits_tag_mask_56) : expanded_2_56; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_57 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_57_0 : _T_149 ? (from_mvin_scale ? expanded_1_57 : _vsm_1_io_resp_bits_tag_mask_57) : expanded_2_57; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_58 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_58_0 : _T_149 ? (from_mvin_scale ? expanded_1_58 : _vsm_1_io_resp_bits_tag_mask_58) : expanded_2_58; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_59 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_59_0 : _T_149 ? (from_mvin_scale ? expanded_1_59 : _vsm_1_io_resp_bits_tag_mask_59) : expanded_2_59; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_60 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_60_0 : _T_149 ? (from_mvin_scale ? expanded_1_60 : _vsm_1_io_resp_bits_tag_mask_60) : expanded_2_60; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_61 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_61_0 : _T_149 ? (from_mvin_scale ? expanded_1_61 : _vsm_1_io_resp_bits_tag_mask_61) : expanded_2_61; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_62 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_62_0 : _T_149 ? (from_mvin_scale ? expanded_1_62 : _vsm_1_io_resp_bits_tag_mask_62) : expanded_2_62; // @[VectorScalarMultiplier.scala:200:21]
assign bank_ios_1_0_write_bits_mask_63 = io_acc_write_0_valid_0 ? io_acc_write_0_bits_mask_63_0 : _T_149 ? (from_mvin_scale ? expanded_1_63 : _vsm_1_io_resp_bits_tag_mask_63) : expanded_2_63; // @[VectorScalarMultiplier.scala:200:21] |
Generate the Verilog code corresponding to the following Chisel files.
File MulRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (ported from Verilog to
Chisel by Andrew Waterman).
Copyright 2019, 2020 The Regents of the University of California. All rights
reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulFullRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(new RawFloat(expWidth, sigWidth))
val b = Input(new RawFloat(expWidth, sigWidth))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth*2 - 1))
})
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
val notSigNaN_invalidExc = (io.a.isInf && io.b.isZero) || (io.a.isZero && io.b.isInf)
val notNaN_isInfOut = io.a.isInf || io.b.isInf
val notNaN_isZeroOut = io.a.isZero || io.b.isZero
val notNaN_signOut = io.a.sign ^ io.b.sign
val common_sExpOut = io.a.sExp + io.b.sExp - (1<<expWidth).S
val common_sigOut = (io.a.sig * io.b.sig)(sigWidth*2 - 1, 0)
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
io.invalidExc := isSigNaNRawFloat(io.a) || isSigNaNRawFloat(io.b) || notSigNaN_invalidExc
io.rawOut.isInf := notNaN_isInfOut
io.rawOut.isZero := notNaN_isZeroOut
io.rawOut.sExp := common_sExpOut
io.rawOut.isNaN := io.a.isNaN || io.b.isNaN
io.rawOut.sign := notNaN_signOut
io.rawOut.sig := common_sigOut
}
class MulRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(new RawFloat(expWidth, sigWidth))
val b = Input(new RawFloat(expWidth, sigWidth))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
val mulFullRaw = Module(new MulFullRawFN(expWidth, sigWidth))
mulFullRaw.io.a := io.a
mulFullRaw.io.b := io.b
io.invalidExc := mulFullRaw.io.invalidExc
io.rawOut := mulFullRaw.io.rawOut
io.rawOut.sig := {
val sig = mulFullRaw.io.rawOut.sig
Cat(sig >> (sigWidth - 2), sig(sigWidth - 3, 0).orR)
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulRecFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(UInt((expWidth + sigWidth + 1).W))
val b = Input(UInt((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(Bool())
val out = Output(UInt((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(UInt(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulRawFN = Module(new MulRawFN(expWidth, sigWidth))
mulRawFN.io.a := rawFloatFromRecFN(expWidth, sigWidth, io.a)
mulRawFN.io.b := rawFloatFromRecFN(expWidth, sigWidth, io.b)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := mulRawFN.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := false.B
roundRawFNToRecFN.io.in := mulRawFN.io.rawOut
roundRawFNToRecFN.io.roundingMode := io.roundingMode
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
| module MulRawFN_41( // @[MulRecFN.scala:75:7]
input io_a_isNaN, // @[MulRecFN.scala:77:16]
input io_a_isInf, // @[MulRecFN.scala:77:16]
input io_a_isZero, // @[MulRecFN.scala:77:16]
input io_a_sign, // @[MulRecFN.scala:77:16]
input [9:0] io_a_sExp, // @[MulRecFN.scala:77:16]
input [24:0] io_a_sig, // @[MulRecFN.scala:77:16]
input io_b_isNaN, // @[MulRecFN.scala:77:16]
input io_b_isInf, // @[MulRecFN.scala:77:16]
input io_b_isZero, // @[MulRecFN.scala:77:16]
input io_b_sign, // @[MulRecFN.scala:77:16]
input [9:0] io_b_sExp, // @[MulRecFN.scala:77:16]
input [24:0] io_b_sig, // @[MulRecFN.scala:77:16]
output io_invalidExc, // @[MulRecFN.scala:77:16]
output io_rawOut_isNaN, // @[MulRecFN.scala:77:16]
output io_rawOut_isInf, // @[MulRecFN.scala:77:16]
output io_rawOut_isZero, // @[MulRecFN.scala:77:16]
output io_rawOut_sign, // @[MulRecFN.scala:77:16]
output [9:0] io_rawOut_sExp, // @[MulRecFN.scala:77:16]
output [26:0] io_rawOut_sig // @[MulRecFN.scala:77:16]
);
wire [47:0] _mulFullRaw_io_rawOut_sig; // @[MulRecFN.scala:84:28]
wire io_a_isNaN_0 = io_a_isNaN; // @[MulRecFN.scala:75:7]
wire io_a_isInf_0 = io_a_isInf; // @[MulRecFN.scala:75:7]
wire io_a_isZero_0 = io_a_isZero; // @[MulRecFN.scala:75:7]
wire io_a_sign_0 = io_a_sign; // @[MulRecFN.scala:75:7]
wire [9:0] io_a_sExp_0 = io_a_sExp; // @[MulRecFN.scala:75:7]
wire [24:0] io_a_sig_0 = io_a_sig; // @[MulRecFN.scala:75:7]
wire io_b_isNaN_0 = io_b_isNaN; // @[MulRecFN.scala:75:7]
wire io_b_isInf_0 = io_b_isInf; // @[MulRecFN.scala:75:7]
wire io_b_isZero_0 = io_b_isZero; // @[MulRecFN.scala:75:7]
wire io_b_sign_0 = io_b_sign; // @[MulRecFN.scala:75:7]
wire [9:0] io_b_sExp_0 = io_b_sExp; // @[MulRecFN.scala:75:7]
wire [24:0] io_b_sig_0 = io_b_sig; // @[MulRecFN.scala:75:7]
wire [26:0] _io_rawOut_sig_T_3; // @[MulRecFN.scala:93:10]
wire io_rawOut_isNaN_0; // @[MulRecFN.scala:75:7]
wire io_rawOut_isInf_0; // @[MulRecFN.scala:75:7]
wire io_rawOut_isZero_0; // @[MulRecFN.scala:75:7]
wire io_rawOut_sign_0; // @[MulRecFN.scala:75:7]
wire [9:0] io_rawOut_sExp_0; // @[MulRecFN.scala:75:7]
wire [26:0] io_rawOut_sig_0; // @[MulRecFN.scala:75:7]
wire io_invalidExc_0; // @[MulRecFN.scala:75:7]
wire [25:0] _io_rawOut_sig_T = _mulFullRaw_io_rawOut_sig[47:22]; // @[MulRecFN.scala:84:28, :93:15]
wire [21:0] _io_rawOut_sig_T_1 = _mulFullRaw_io_rawOut_sig[21:0]; // @[MulRecFN.scala:84:28, :93:37]
wire _io_rawOut_sig_T_2 = |_io_rawOut_sig_T_1; // @[MulRecFN.scala:93:{37,55}]
assign _io_rawOut_sig_T_3 = {_io_rawOut_sig_T, _io_rawOut_sig_T_2}; // @[MulRecFN.scala:93:{10,15,55}]
assign io_rawOut_sig_0 = _io_rawOut_sig_T_3; // @[MulRecFN.scala:75:7, :93:10]
MulFullRawFN_41 mulFullRaw ( // @[MulRecFN.scala:84:28]
.io_a_isNaN (io_a_isNaN_0), // @[MulRecFN.scala:75:7]
.io_a_isInf (io_a_isInf_0), // @[MulRecFN.scala:75:7]
.io_a_isZero (io_a_isZero_0), // @[MulRecFN.scala:75:7]
.io_a_sign (io_a_sign_0), // @[MulRecFN.scala:75:7]
.io_a_sExp (io_a_sExp_0), // @[MulRecFN.scala:75:7]
.io_a_sig (io_a_sig_0), // @[MulRecFN.scala:75:7]
.io_b_isNaN (io_b_isNaN_0), // @[MulRecFN.scala:75:7]
.io_b_isInf (io_b_isInf_0), // @[MulRecFN.scala:75:7]
.io_b_isZero (io_b_isZero_0), // @[MulRecFN.scala:75:7]
.io_b_sign (io_b_sign_0), // @[MulRecFN.scala:75:7]
.io_b_sExp (io_b_sExp_0), // @[MulRecFN.scala:75:7]
.io_b_sig (io_b_sig_0), // @[MulRecFN.scala:75:7]
.io_invalidExc (io_invalidExc_0),
.io_rawOut_isNaN (io_rawOut_isNaN_0),
.io_rawOut_isInf (io_rawOut_isInf_0),
.io_rawOut_isZero (io_rawOut_isZero_0),
.io_rawOut_sign (io_rawOut_sign_0),
.io_rawOut_sExp (io_rawOut_sExp_0),
.io_rawOut_sig (_mulFullRaw_io_rawOut_sig)
); // @[MulRecFN.scala:84:28]
assign io_invalidExc = io_invalidExc_0; // @[MulRecFN.scala:75:7]
assign io_rawOut_isNaN = io_rawOut_isNaN_0; // @[MulRecFN.scala:75:7]
assign io_rawOut_isInf = io_rawOut_isInf_0; // @[MulRecFN.scala:75:7]
assign io_rawOut_isZero = io_rawOut_isZero_0; // @[MulRecFN.scala:75:7]
assign io_rawOut_sign = io_rawOut_sign_0; // @[MulRecFN.scala:75:7]
assign io_rawOut_sExp = io_rawOut_sExp_0; // @[MulRecFN.scala:75:7]
assign io_rawOut_sig = io_rawOut_sig_0; // @[MulRecFN.scala:75:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Serdes.scala:
package testchipip.serdes
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config._
class GenericSerializer[T <: Data](t: T, flitWidth: Int) extends Module {
override def desiredName = s"GenericSerializer_${t.typeName}w${t.getWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(t))
val out = Decoupled(new Flit(flitWidth))
val busy = Output(Bool())
})
val dataBits = t.getWidth.max(flitWidth)
val dataBeats = (dataBits - 1) / flitWidth + 1
require(dataBeats >= 1)
val data = Reg(Vec(dataBeats, UInt(flitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready && beat === 0.U
io.out.valid := io.in.valid || beat =/= 0.U
io.out.bits.flit := Mux(beat === 0.U, io.in.bits.asUInt, data(beat))
when (io.out.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) {
data := io.in.bits.asTypeOf(Vec(dataBeats, UInt(flitWidth.W)))
data(0) := DontCare // unused, DCE this
}
}
io.busy := io.out.valid
}
class GenericDeserializer[T <: Data](t: T, flitWidth: Int) extends Module {
override def desiredName = s"GenericDeserializer_${t.typeName}w${t.getWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(t)
val busy = Output(Bool())
})
val dataBits = t.getWidth.max(flitWidth)
val dataBeats = (dataBits - 1) / flitWidth + 1
require(dataBeats >= 1)
val data = Reg(Vec(dataBeats-1, UInt(flitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready || beat =/= (dataBeats-1).U
io.out.valid := io.in.valid && beat === (dataBeats-1).U
io.out.bits := (if (dataBeats == 1) {
io.in.bits.flit.asTypeOf(t)
} else {
Cat(io.in.bits.flit, data.asUInt).asTypeOf(t)
})
when (io.in.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
if (dataBeats > 1) {
when (beat =/= (dataBeats-1).U) {
data(beat(log2Ceil(dataBeats-1)-1,0)) := io.in.bits.flit
}
}
}
io.busy := beat =/= 0.U
}
class FlitToPhit(flitWidth: Int, phitWidth: Int) extends Module {
override def desiredName = s"FlitToPhit_f${flitWidth}_p${phitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Phit(phitWidth))
})
require(flitWidth >= phitWidth)
val dataBeats = (flitWidth - 1) / phitWidth + 1
val data = Reg(Vec(dataBeats-1, UInt(phitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready && beat === 0.U
io.out.valid := io.in.valid || beat =/= 0.U
io.out.bits.phit := (if (dataBeats == 1) io.in.bits.flit else Mux(beat === 0.U, io.in.bits.flit, data(beat-1.U)))
when (io.out.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) {
data := io.in.bits.asTypeOf(Vec(dataBeats, UInt(phitWidth.W))).tail
}
}
}
object FlitToPhit {
def apply(flit: DecoupledIO[Flit], phitWidth: Int): DecoupledIO[Phit] = {
val flit2phit = Module(new FlitToPhit(flit.bits.flitWidth, phitWidth))
flit2phit.io.in <> flit
flit2phit.io.out
}
}
class PhitToFlit(flitWidth: Int, phitWidth: Int) extends Module {
override def desiredName = s"PhitToFlit_p${phitWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Phit(phitWidth)))
val out = Decoupled(new Flit(flitWidth))
})
require(flitWidth >= phitWidth)
val dataBeats = (flitWidth - 1) / phitWidth + 1
val data = Reg(Vec(dataBeats-1, UInt(phitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready || beat =/= (dataBeats-1).U
io.out.valid := io.in.valid && beat === (dataBeats-1).U
io.out.bits.flit := (if (dataBeats == 1) io.in.bits.phit else Cat(io.in.bits.phit, data.asUInt))
when (io.in.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
if (dataBeats > 1) {
when (beat =/= (dataBeats-1).U) {
data(beat) := io.in.bits.phit
}
}
}
}
object PhitToFlit {
def apply(phit: DecoupledIO[Phit], flitWidth: Int): DecoupledIO[Flit] = {
val phit2flit = Module(new PhitToFlit(flitWidth, phit.bits.phitWidth))
phit2flit.io.in <> phit
phit2flit.io.out
}
def apply(phit: ValidIO[Phit], flitWidth: Int): ValidIO[Flit] = {
val phit2flit = Module(new PhitToFlit(flitWidth, phit.bits.phitWidth))
phit2flit.io.in.valid := phit.valid
phit2flit.io.in.bits := phit.bits
when (phit.valid) { assert(phit2flit.io.in.ready) }
val out = Wire(Valid(new Flit(flitWidth)))
out.valid := phit2flit.io.out.valid
out.bits := phit2flit.io.out.bits
phit2flit.io.out.ready := true.B
out
}
}
class PhitArbiter(phitWidth: Int, flitWidth: Int, channels: Int) extends Module {
override def desiredName = s"PhitArbiter_p${phitWidth}_f${flitWidth}_n${channels}"
val io = IO(new Bundle {
val in = Flipped(Vec(channels, Decoupled(new Phit(phitWidth))))
val out = Decoupled(new Phit(phitWidth))
})
if (channels == 1) {
io.out <> io.in(0)
} else {
val headerWidth = log2Ceil(channels)
val headerBeats = (headerWidth - 1) / phitWidth + 1
val flitBeats = (flitWidth - 1) / phitWidth + 1
val beats = headerBeats + flitBeats
val beat = RegInit(0.U(log2Ceil(beats).W))
val chosen_reg = Reg(UInt(headerWidth.W))
val chosen_prio = PriorityEncoder(io.in.map(_.valid))
val chosen = Mux(beat === 0.U, chosen_prio, chosen_reg)
val header_idx = if (headerBeats == 1) 0.U else beat(log2Ceil(headerBeats)-1,0)
io.out.valid := VecInit(io.in.map(_.valid))(chosen)
io.out.bits.phit := Mux(beat < headerBeats.U,
chosen.asTypeOf(Vec(headerBeats, UInt(phitWidth.W)))(header_idx),
VecInit(io.in.map(_.bits.phit))(chosen))
for (i <- 0 until channels) {
io.in(i).ready := io.out.ready && beat >= headerBeats.U && chosen_reg === i.U
}
when (io.out.fire) {
beat := Mux(beat === (beats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) { chosen_reg := chosen_prio }
}
}
}
class PhitDemux(phitWidth: Int, flitWidth: Int, channels: Int) extends Module {
override def desiredName = s"PhitDemux_p${phitWidth}_f${flitWidth}_n${channels}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Phit(phitWidth)))
val out = Vec(channels, Decoupled(new Phit(phitWidth)))
})
if (channels == 1) {
io.out(0) <> io.in
} else {
val headerWidth = log2Ceil(channels)
val headerBeats = (headerWidth - 1) / phitWidth + 1
val flitBeats = (flitWidth - 1) / phitWidth + 1
val beats = headerBeats + flitBeats
val beat = RegInit(0.U(log2Ceil(beats).W))
val channel_vec = Reg(Vec(headerBeats, UInt(phitWidth.W)))
val channel = channel_vec.asUInt(log2Ceil(channels)-1,0)
val header_idx = if (headerBeats == 1) 0.U else beat(log2Ceil(headerBeats)-1,0)
io.in.ready := beat < headerBeats.U || VecInit(io.out.map(_.ready))(channel)
for (c <- 0 until channels) {
io.out(c).valid := io.in.valid && beat >= headerBeats.U && channel === c.U
io.out(c).bits.phit := io.in.bits.phit
}
when (io.in.fire) {
beat := Mux(beat === (beats-1).U, 0.U, beat + 1.U)
when (beat < headerBeats.U) {
channel_vec(header_idx) := io.in.bits.phit
}
}
}
}
class DecoupledFlitToCreditedFlit(flitWidth: Int, bufferSz: Int) extends Module {
override def desiredName = s"DecoupledFlitToCreditedFlit_f${flitWidth}_b${bufferSz}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Flit(flitWidth))
val credit = Flipped(Decoupled(new Flit(flitWidth)))
})
val creditWidth = log2Ceil(bufferSz)
require(creditWidth <= flitWidth)
val credits = RegInit(0.U((creditWidth+1).W))
val credit_incr = io.out.fire
val credit_decr = io.credit.fire
when (credit_incr || credit_decr) {
credits := credits + credit_incr - Mux(io.credit.valid, io.credit.bits.flit +& 1.U, 0.U)
}
io.out.valid := io.in.valid && credits < bufferSz.U
io.out.bits.flit := io.in.bits.flit
io.in.ready := io.out.ready && credits < bufferSz.U
io.credit.ready := true.B
}
class CreditedFlitToDecoupledFlit(flitWidth: Int, bufferSz: Int) extends Module {
override def desiredName = s"CreditedFlitToDecoupledFlit_f${flitWidth}_b${bufferSz}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Flit(flitWidth))
val credit = Decoupled(new Flit(flitWidth))
})
val creditWidth = log2Ceil(bufferSz)
require(creditWidth <= flitWidth)
val buffer = Module(new Queue(new Flit(flitWidth), bufferSz))
val credits = RegInit(0.U((creditWidth+1).W))
val credit_incr = buffer.io.deq.fire
val credit_decr = io.credit.fire
when (credit_incr || credit_decr) {
credits := credit_incr + Mux(credit_decr, 0.U, credits)
}
buffer.io.enq.valid := io.in.valid
buffer.io.enq.bits := io.in.bits
io.in.ready := true.B
when (io.in.valid) { assert(buffer.io.enq.ready) }
io.out <> buffer.io.deq
io.credit.valid := credits =/= 0.U
io.credit.bits.flit := credits - 1.U
}
| module GenericDeserializer_TLBeatw10_f32_TestHarness_UNIQUIFIED( // @[Serdes.scala:37:7]
output io_in_ready, // @[Serdes.scala:39:14]
input io_in_valid, // @[Serdes.scala:39:14]
input [31:0] io_in_bits_flit, // @[Serdes.scala:39:14]
input io_out_ready, // @[Serdes.scala:39:14]
output io_out_valid, // @[Serdes.scala:39:14]
output io_out_bits_head, // @[Serdes.scala:39:14]
output io_out_bits_tail // @[Serdes.scala:39:14]
);
assign io_in_ready = io_out_ready; // @[Serdes.scala:37:7]
assign io_out_valid = io_in_valid; // @[Serdes.scala:37:7]
assign io_out_bits_head = io_in_bits_flit[1]; // @[Serdes.scala:37:7, :54:29]
assign io_out_bits_tail = io_in_bits_flit[0]; // @[Serdes.scala:37:7, :54:29]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File Parameters.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property.cover
import scala.math.{min,max}
case class CacheParameters(
level: Int,
ways: Int,
sets: Int,
blockBytes: Int,
beatBytes: Int, // inner
hintsSkipProbe: Boolean)
{
require (ways > 0)
require (sets > 0)
require (blockBytes > 0 && isPow2(blockBytes))
require (beatBytes > 0 && isPow2(beatBytes))
require (blockBytes >= beatBytes)
val blocks = ways * sets
val sizeBytes = blocks * blockBytes
val blockBeats = blockBytes/beatBytes
}
case class InclusiveCachePortParameters(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e))
}
object InclusiveCachePortParameters
{
val none = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.none)
val full = InclusiveCachePortParameters(
a = BufferParams.default,
b = BufferParams.default,
c = BufferParams.default,
d = BufferParams.default,
e = BufferParams.default)
// This removes feed-through paths from C=>A and A=>C
val fullC = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.default,
d = BufferParams.none,
e = BufferParams.none)
val flowAD = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.flow,
e = BufferParams.none)
val flowAE = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.flow)
// For innerBuf:
// SinkA: no restrictions, flows into scheduler+putbuffer
// SourceB: no restrictions, flows out of scheduler
// sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore
// SourceD: no restrictions, flows out of bankedStore/regout
// SinkE: no restrictions, flows into scheduler
//
// ... so while none is possible, you probably want at least flowAC to cut ready
// from the scheduler delay and flowD to ease SourceD back-pressure
// For outerBufer:
// SourceA: must not be pipe, flows out of scheduler
// SinkB: no restrictions, flows into scheduler
// SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored
// SinkD: no restrictions, flows into scheduler & bankedStore
// SourceE: must not be pipe, flows out of scheduler
//
// ... AE take the channel ready into the scheduler, so you need at least flowAE
}
case class InclusiveCacheMicroParameters(
writeBytes: Int, // backing store update granularity
memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz)
portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes
dirReg: Boolean = false,
innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none
outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE
{
require (writeBytes > 0 && isPow2(writeBytes))
require (memCycles > 0)
require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant
}
case class InclusiveCacheControlParameters(
address: BigInt,
beatBytes: Int,
bankedControl: Boolean)
case class InclusiveCacheParameters(
cache: CacheParameters,
micro: InclusiveCacheMicroParameters,
control: Boolean,
inner: TLEdgeIn,
outer: TLEdgeOut)(implicit val p: Parameters)
{
require (cache.ways > 1)
require (cache.sets > 1 && isPow2(cache.sets))
require (micro.writeBytes <= inner.manager.beatBytes)
require (micro.writeBytes <= outer.manager.beatBytes)
require (inner.manager.beatBytes <= cache.blockBytes)
require (outer.manager.beatBytes <= cache.blockBytes)
// Require that all cached address ranges have contiguous blocks
outer.manager.managers.flatMap(_.address).foreach { a =>
require (a.alignment >= cache.blockBytes)
}
// If we are the first level cache, we do not need to support inner-BCE
val firstLevel = !inner.client.clients.exists(_.supports.probe)
// If we are the last level cache, we do not need to support outer-B
val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED)
require (lastLevel)
// Provision enough resources to achieve full throughput with missing single-beat accesses
val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro)
val secondary = max(mshrs, micro.memCycles - mshrs)
val putLists = micro.memCycles // allow every request to be single beat
val putBeats = max(2*cache.blockBeats, micro.memCycles)
val relLists = 2
val relBeats = relLists*cache.blockBeats
val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address))
val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_))
def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] =
if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail)
val addressMapping = bitOffsets(pickMask)
val addressBits = addressMapping.size
// println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}")
val allClients = inner.client.clients.size
val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size
val clientBits = max(1, clientBitsRaw)
val stateBits = 2
val wayBits = log2Ceil(cache.ways)
val setBits = log2Ceil(cache.sets)
val offsetBits = log2Ceil(cache.blockBytes)
val tagBits = addressBits - setBits - offsetBits
val putBits = log2Ceil(max(putLists, relLists))
require (tagBits > 0)
require (offsetBits > 0)
val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1
val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1
val innerMaskBits = inner.manager.beatBytes / micro.writeBytes
val outerMaskBits = outer.manager.beatBytes / micro.writeBytes
def clientBit(source: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse)
}
}
def clientSource(bit: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U))
}
}
def parseAddress(x: UInt): (UInt, UInt, UInt) = {
val offset = Cat(addressMapping.map(o => x(o,o)).reverse)
val set = offset >> offsetBits
val tag = set >> setBits
(tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0))
}
def widen(x: UInt, width: Int): UInt = {
val y = x | 0.U(width.W)
assert (y >> width === 0.U)
y(width-1, 0)
}
def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = {
val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits))
val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) }
addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) }
Cat(bits.reverse)
}
def restoreAddress(expanded: UInt): UInt = {
val missingBits = flatAddresses
.map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match
.groupBy(_._1)
.view
.mapValues(_.map(_._2))
val muxMask = AddressDecoder(missingBits.values.toList)
val mux = missingBits.toList.map { case (bits, addrs) =>
val widen = addrs.map(_.widen(~muxMask))
val matches = AddressSet
.unify(widen.distinct)
.map(_.contains(expanded))
.reduce(_ || _)
(matches, bits.U)
}
expanded | Mux1H(mux)
}
def dirReg[T <: Data](x: T, en: Bool = true.B): T = {
if (micro.dirReg) RegEnable(x, en) else x
}
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc)
}
object MetaData
{
val stateBits = 2
def INVALID: UInt = 0.U(stateBits.W) // way is empty
def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk
def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk
def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch
// Does a request need trunk?
def needT(opcode: UInt, param: UInt): Bool = {
!opcode(2) ||
(opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) ||
((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB)
}
// Does a request prove the client need not be probed?
def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = {
// Acquire(toB) and Get => is N, so no probe
// Acquire(*toT) => is N or B, but need T, so no probe
// Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client
// Put* => is N or B, so probe IS needed
opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B)
}
def isToN(param: UInt): Bool = {
param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN
}
def isToB(param: UInt): Bool = {
param === TLPermissions.TtoB || param === TLPermissions.BtoB
}
}
object InclusiveCacheParameters
{
val lfsrBits = 10
val L2ControlAddress = 0x2010000
val L2ControlSize = 0x1000
def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = {
// We need 2-3 normal MSHRs to cover the Directory latency
// To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs
max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats)
}
def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int =
// We need a dedicated MSHR for B+C each
2 + out_mshrs(cache, micro)
}
class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
File InclusiveCache.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.subsystem.{SubsystemBankedCoherenceKey}
import freechips.rocketchip.regmapper._
import freechips.rocketchip.tilelink._
class InclusiveCache(
val cache: CacheParameters,
val micro: InclusiveCacheMicroParameters,
control: Option[InclusiveCacheControlParameters] = None
)(implicit p: Parameters)
extends LazyModule
{
val access = TransferSizes(1, cache.blockBytes)
val xfer = TransferSizes(cache.blockBytes, cache.blockBytes)
val atom = TransferSizes(1, cache.beatBytes)
var resourcesOpt: Option[ResourceBindings] = None
val device: SimpleDevice = new SimpleDevice("cache-controller", Seq("sifive,inclusivecache0", "cache")) {
def ofInt(x: Int) = Seq(ResourceInt(BigInt(x)))
override def describe(resources: ResourceBindings): Description = {
resourcesOpt = Some(resources)
val Description(name, mapping) = super.describe(resources)
// Find the outer caches
val outer = node.edges.out
.flatMap(_.manager.managers)
.filter(_.supportsAcquireB)
.flatMap(_.resources.headOption)
.map(_.owner.label)
.distinct
val nextlevel: Option[(String, Seq[ResourceValue])] =
if (outer.isEmpty) {
None
} else {
Some("next-level-cache" -> outer.map(l => ResourceReference(l)).toList)
}
val extra = Map(
"cache-level" -> ofInt(2),
"cache-unified" -> Nil,
"cache-size" -> ofInt(cache.sizeBytes * node.edges.in.size),
"cache-sets" -> ofInt(cache.sets * node.edges.in.size),
"cache-block-size" -> ofInt(cache.blockBytes),
"sifive,mshr-count" -> ofInt(InclusiveCacheParameters.all_mshrs(cache, micro)))
Description(name, mapping ++ extra ++ nextlevel)
}
}
val node: TLAdapterNode = TLAdapterNode(
clientFn = { _ => TLClientPortParameters(Seq(TLClientParameters(
name = s"L${cache.level} InclusiveCache",
sourceId = IdRange(0, InclusiveCacheParameters.out_mshrs(cache, micro)),
supportsProbe = xfer)))
},
managerFn = { m => TLManagerPortParameters(
managers = m.managers.map { m => m.copy(
regionType = if (m.regionType >= RegionType.UNCACHED) RegionType.CACHED else m.regionType,
resources = Resource(device, "caches") +: m.resources,
supportsAcquireB = xfer,
supportsAcquireT = if (m.supportsAcquireT) xfer else TransferSizes.none,
supportsArithmetic = if (m.supportsAcquireT) atom else TransferSizes.none,
supportsLogical = if (m.supportsAcquireT) atom else TransferSizes.none,
supportsGet = access,
supportsPutFull = if (m.supportsAcquireT) access else TransferSizes.none,
supportsPutPartial = if (m.supportsAcquireT) access else TransferSizes.none,
supportsHint = access,
alwaysGrantsT = false,
fifoId = None)
},
beatBytes = cache.beatBytes,
endSinkId = InclusiveCacheParameters.all_mshrs(cache, micro),
minLatency = 2)
})
val ctrls = control.map { c =>
val nCtrls = if (c.bankedControl) p(SubsystemBankedCoherenceKey).nBanks else 1
Seq.tabulate(nCtrls) { i => LazyModule(new InclusiveCacheControl(this,
c.copy(address = c.address + i * InclusiveCacheParameters.L2ControlSize))) }
}.getOrElse(Nil)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
// If you have a control port, you must have at least one cache port
require (ctrls.isEmpty || !node.edges.in.isEmpty)
// Extract the client IdRanges; must be the same on all ports!
val clientIds = node.edges.in.headOption.map(_.client.clients.map(_.sourceId).sortBy(_.start))
node.edges.in.foreach { e => require(e.client.clients.map(_.sourceId).sortBy(_.start) == clientIds.get) }
// Use the natural ordering of clients (just like in Directory)
node.edges.in.headOption.foreach { n =>
println(s"L${cache.level} InclusiveCache Client Map:")
n.client.clients.zipWithIndex.foreach { case (c,i) =>
println(s"\t${i} <= ${c.name}")
}
println("")
}
// Create the L2 Banks
val mods = (node.in zip node.out) map { case ((in, edgeIn), (out, edgeOut)) =>
edgeOut.manager.managers.foreach { m =>
require (m.supportsAcquireB.contains(xfer),
s"All managers behind the L2 must support acquireB($xfer) " +
s"but ${m.name} only supports (${m.supportsAcquireB})!")
if (m.supportsAcquireT) require (m.supportsAcquireT.contains(xfer),
s"Any probing managers behind the L2 must support acquireT($xfer) " +
s"but ${m.name} only supports (${m.supportsAcquireT})!")
}
val params = InclusiveCacheParameters(cache, micro, !ctrls.isEmpty, edgeIn, edgeOut)
val scheduler = Module(new InclusiveCacheBankScheduler(params)).suggestName("inclusive_cache_bank_sched")
scheduler.io.in <> in
out <> scheduler.io.out
scheduler.io.ways := DontCare
scheduler.io.divs := DontCare
// Tie down default values in case there is no controller
scheduler.io.req.valid := false.B
scheduler.io.req.bits.address := 0.U
scheduler.io.resp.ready := true.B
// Fix-up the missing addresses. We do this here so that the Scheduler can be
// deduplicated by Firrtl to make hierarchical place-and-route easier.
out.a.bits.address := params.restoreAddress(scheduler.io.out.a.bits.address)
in .b.bits.address := params.restoreAddress(scheduler.io.in .b.bits.address)
out.c.bits.address := params.restoreAddress(scheduler.io.out.c.bits.address)
scheduler
}
ctrls.foreach { ctrl =>
ctrl.module.io.flush_req.ready := false.B
ctrl.module.io.flush_resp := false.B
ctrl.module.io.flush_match := false.B
}
mods.zip(node.edges.in).zipWithIndex.foreach { case ((sched, edgeIn), i) =>
val ctrl = if (ctrls.size > 1) Some(ctrls(i)) else ctrls.headOption
ctrl.foreach { ctrl => {
val contained = edgeIn.manager.managers.flatMap(_.address)
.map(_.contains(ctrl.module.io.flush_req.bits)).reduce(_||_)
when (contained) { ctrl.module.io.flush_match := true.B }
sched.io.req.valid := contained && ctrl.module.io.flush_req.valid
sched.io.req.bits.address := ctrl.module.io.flush_req.bits
when (contained && sched.io.req.ready) { ctrl.module.io.flush_req.ready := true.B }
when (sched.io.resp.valid) { ctrl.module.io.flush_resp := true.B }
sched.io.resp.ready := true.B
}}
}
def json = s"""{"banks":[${mods.map(_.json).mkString(",")}]}"""
}
}
| module InclusiveCache( // @[InclusiveCache.scala:108:9]
input clock, // @[InclusiveCache.scala:108:9]
input reset, // @[InclusiveCache.scala:108:9]
output auto_ctrls_ctrl_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_ctrls_ctrl_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_ctrls_ctrl_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_ctrls_ctrl_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_ctrls_ctrl_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [10:0] auto_ctrls_ctrl_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [25:0] auto_ctrls_ctrl_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_ctrls_ctrl_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_ctrls_ctrl_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_ctrls_ctrl_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_ctrls_ctrl_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_ctrls_ctrl_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_ctrls_ctrl_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_ctrls_ctrl_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [10:0] auto_ctrls_ctrl_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_ctrls_ctrl_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_3_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_3_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_3_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_3_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_3_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_3_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_3_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_3_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_3_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_3_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_3_b_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_3_b_valid, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_3_b_bits_param, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_in_3_b_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_in_3_b_bits_address, // @[LazyModuleImp.scala:107:25]
output auto_in_3_c_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_3_c_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_3_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_3_c_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_3_c_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_3_c_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_3_c_bits_address, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_3_c_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_3_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_3_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_3_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_3_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_3_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_3_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_in_3_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_3_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_3_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_3_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_3_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_3_e_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_3_e_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_2_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_2_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_2_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_2_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_2_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_2_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_2_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_2_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_2_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_2_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_2_b_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_2_b_valid, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_2_b_bits_param, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_in_2_b_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_in_2_b_bits_address, // @[LazyModuleImp.scala:107:25]
output auto_in_2_c_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_2_c_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_2_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_2_c_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_2_c_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_2_c_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_2_c_bits_address, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_2_c_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_2_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_2_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_2_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_2_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_2_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_2_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_in_2_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_2_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_2_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_2_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_2_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_2_e_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_2_e_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_1_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_1_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_1_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_1_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_1_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_1_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_1_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_1_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_1_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_1_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_1_b_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_1_b_valid, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_1_b_bits_param, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_in_1_b_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_in_1_b_bits_address, // @[LazyModuleImp.scala:107:25]
output auto_in_1_c_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_1_c_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_1_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_1_c_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_1_c_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_1_c_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_1_c_bits_address, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_1_c_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_1_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_1_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_1_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_1_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_1_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_1_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_in_1_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_1_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_1_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_1_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_1_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_1_e_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_1_e_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_0_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_0_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_0_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_0_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_0_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_0_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_0_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_0_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_0_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_0_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_0_b_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_0_b_valid, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_0_b_bits_param, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_in_0_b_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_in_0_b_bits_address, // @[LazyModuleImp.scala:107:25]
output auto_in_0_c_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_0_c_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_0_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_0_c_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_0_c_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_0_c_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_0_c_bits_address, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_0_c_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_0_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_0_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_0_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_0_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_0_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_0_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_in_0_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_0_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_0_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_0_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_0_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_0_e_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_0_e_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_3_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_3_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_3_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_3_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_3_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_3_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_3_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_3_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_3_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_3_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_3_c_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_3_c_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_3_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_3_c_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_3_c_bits_size, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_3_c_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_3_c_bits_address, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_3_c_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_3_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_3_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_3_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_3_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_3_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_3_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_3_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_3_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_3_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_3_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_3_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_3_e_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_3_e_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_2_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_2_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_2_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_2_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_2_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_2_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_2_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_2_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_2_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_2_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_2_c_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_2_c_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_2_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_2_c_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_2_c_bits_size, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_2_c_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_2_c_bits_address, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_2_c_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_2_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_2_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_2_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_2_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_2_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_2_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_2_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_2_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_2_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_2_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_2_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_2_e_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_2_e_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_1_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_1_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_1_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_1_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_1_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_1_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_1_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_1_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_1_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_1_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_1_c_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_1_c_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_1_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_1_c_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_1_c_bits_size, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_1_c_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_1_c_bits_address, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_1_c_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_1_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_1_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_1_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_1_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_1_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_1_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_1_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_1_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_1_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_1_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_1_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_1_e_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_1_e_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_0_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_0_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_0_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_0_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_0_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_0_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_0_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_0_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_0_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_0_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_0_c_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_0_c_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_0_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_0_c_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_0_c_bits_size, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_0_c_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_0_c_bits_address, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_0_c_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_0_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_0_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_0_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_0_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_0_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_0_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_0_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_0_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_0_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_0_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_0_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_0_e_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_0_e_bits_sink // @[LazyModuleImp.scala:107:25]
);
wire _inclusive_cache_bank_sched_3_io_in_a_ready; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_3_io_in_b_valid; // @[InclusiveCache.scala:137:29]
wire [1:0] _inclusive_cache_bank_sched_3_io_in_b_bits_param; // @[InclusiveCache.scala:137:29]
wire [5:0] _inclusive_cache_bank_sched_3_io_in_b_bits_source; // @[InclusiveCache.scala:137:29]
wire [31:0] _inclusive_cache_bank_sched_3_io_in_b_bits_address; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_3_io_in_c_ready; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_3_io_in_d_valid; // @[InclusiveCache.scala:137:29]
wire [2:0] _inclusive_cache_bank_sched_3_io_in_d_bits_opcode; // @[InclusiveCache.scala:137:29]
wire [1:0] _inclusive_cache_bank_sched_3_io_in_d_bits_param; // @[InclusiveCache.scala:137:29]
wire [2:0] _inclusive_cache_bank_sched_3_io_in_d_bits_size; // @[InclusiveCache.scala:137:29]
wire [5:0] _inclusive_cache_bank_sched_3_io_in_d_bits_source; // @[InclusiveCache.scala:137:29]
wire [2:0] _inclusive_cache_bank_sched_3_io_in_d_bits_sink; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_3_io_in_d_bits_denied; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_3_io_in_d_bits_corrupt; // @[InclusiveCache.scala:137:29]
wire [31:0] _inclusive_cache_bank_sched_3_io_out_a_bits_address; // @[InclusiveCache.scala:137:29]
wire [31:0] _inclusive_cache_bank_sched_3_io_out_c_bits_address; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_3_io_req_ready; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_3_io_resp_valid; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_2_io_in_a_ready; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_2_io_in_b_valid; // @[InclusiveCache.scala:137:29]
wire [1:0] _inclusive_cache_bank_sched_2_io_in_b_bits_param; // @[InclusiveCache.scala:137:29]
wire [5:0] _inclusive_cache_bank_sched_2_io_in_b_bits_source; // @[InclusiveCache.scala:137:29]
wire [31:0] _inclusive_cache_bank_sched_2_io_in_b_bits_address; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_2_io_in_c_ready; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_2_io_in_d_valid; // @[InclusiveCache.scala:137:29]
wire [2:0] _inclusive_cache_bank_sched_2_io_in_d_bits_opcode; // @[InclusiveCache.scala:137:29]
wire [1:0] _inclusive_cache_bank_sched_2_io_in_d_bits_param; // @[InclusiveCache.scala:137:29]
wire [2:0] _inclusive_cache_bank_sched_2_io_in_d_bits_size; // @[InclusiveCache.scala:137:29]
wire [5:0] _inclusive_cache_bank_sched_2_io_in_d_bits_source; // @[InclusiveCache.scala:137:29]
wire [2:0] _inclusive_cache_bank_sched_2_io_in_d_bits_sink; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_2_io_in_d_bits_denied; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_2_io_in_d_bits_corrupt; // @[InclusiveCache.scala:137:29]
wire [31:0] _inclusive_cache_bank_sched_2_io_out_a_bits_address; // @[InclusiveCache.scala:137:29]
wire [31:0] _inclusive_cache_bank_sched_2_io_out_c_bits_address; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_2_io_req_ready; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_2_io_resp_valid; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_1_io_in_a_ready; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_1_io_in_b_valid; // @[InclusiveCache.scala:137:29]
wire [1:0] _inclusive_cache_bank_sched_1_io_in_b_bits_param; // @[InclusiveCache.scala:137:29]
wire [5:0] _inclusive_cache_bank_sched_1_io_in_b_bits_source; // @[InclusiveCache.scala:137:29]
wire [31:0] _inclusive_cache_bank_sched_1_io_in_b_bits_address; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_1_io_in_c_ready; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_1_io_in_d_valid; // @[InclusiveCache.scala:137:29]
wire [2:0] _inclusive_cache_bank_sched_1_io_in_d_bits_opcode; // @[InclusiveCache.scala:137:29]
wire [1:0] _inclusive_cache_bank_sched_1_io_in_d_bits_param; // @[InclusiveCache.scala:137:29]
wire [2:0] _inclusive_cache_bank_sched_1_io_in_d_bits_size; // @[InclusiveCache.scala:137:29]
wire [5:0] _inclusive_cache_bank_sched_1_io_in_d_bits_source; // @[InclusiveCache.scala:137:29]
wire [2:0] _inclusive_cache_bank_sched_1_io_in_d_bits_sink; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_1_io_in_d_bits_denied; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_1_io_in_d_bits_corrupt; // @[InclusiveCache.scala:137:29]
wire [31:0] _inclusive_cache_bank_sched_1_io_out_a_bits_address; // @[InclusiveCache.scala:137:29]
wire [31:0] _inclusive_cache_bank_sched_1_io_out_c_bits_address; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_1_io_req_ready; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_1_io_resp_valid; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_io_in_a_ready; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_io_in_b_valid; // @[InclusiveCache.scala:137:29]
wire [1:0] _inclusive_cache_bank_sched_io_in_b_bits_param; // @[InclusiveCache.scala:137:29]
wire [5:0] _inclusive_cache_bank_sched_io_in_b_bits_source; // @[InclusiveCache.scala:137:29]
wire [31:0] _inclusive_cache_bank_sched_io_in_b_bits_address; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_io_in_c_ready; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_io_in_d_valid; // @[InclusiveCache.scala:137:29]
wire [2:0] _inclusive_cache_bank_sched_io_in_d_bits_opcode; // @[InclusiveCache.scala:137:29]
wire [1:0] _inclusive_cache_bank_sched_io_in_d_bits_param; // @[InclusiveCache.scala:137:29]
wire [2:0] _inclusive_cache_bank_sched_io_in_d_bits_size; // @[InclusiveCache.scala:137:29]
wire [5:0] _inclusive_cache_bank_sched_io_in_d_bits_source; // @[InclusiveCache.scala:137:29]
wire [2:0] _inclusive_cache_bank_sched_io_in_d_bits_sink; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_io_in_d_bits_denied; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_io_in_d_bits_corrupt; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_io_req_ready; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_io_resp_valid; // @[InclusiveCache.scala:137:29]
wire _ctrls_io_flush_req_valid; // @[InclusiveCache.scala:103:43]
wire [63:0] _ctrls_io_flush_req_bits; // @[InclusiveCache.scala:103:43]
wire [31:0] nodeIn_1_b_bits_address = {_inclusive_cache_bank_sched_1_io_in_b_bits_address[31:7], _inclusive_cache_bank_sched_1_io_in_b_bits_address[6:0] | 7'h40}; // @[Parameters.scala:248:14]
wire [31:0] nodeIn_2_b_bits_address = {_inclusive_cache_bank_sched_2_io_in_b_bits_address[31:8], _inclusive_cache_bank_sched_2_io_in_b_bits_address[7:0] | 8'h80}; // @[Parameters.scala:248:14]
wire [31:0] nodeIn_3_b_bits_address = {_inclusive_cache_bank_sched_3_io_in_b_bits_address[31:8], _inclusive_cache_bank_sched_3_io_in_b_bits_address[7:0] | 8'hC0}; // @[Parameters.scala:248:14]
wire contained = {_ctrls_io_flush_req_bits[63:32], _ctrls_io_flush_req_bits[31:28] ^ 4'h8, _ctrls_io_flush_req_bits[7:6]} == 38'h0 | {_ctrls_io_flush_req_bits[63:28], _ctrls_io_flush_req_bits[27:16] ^ 12'h800, _ctrls_io_flush_req_bits[7:6]} == 50'h0; // @[Parameters.scala:137:{31,41,46,59}]
wire [25:0] _GEN = _ctrls_io_flush_req_bits[31:6] ^ 26'h2000001; // @[Parameters.scala:137:31]
wire [21:0] _GEN_0 = _ctrls_io_flush_req_bits[27:6] ^ 22'h200001; // @[Parameters.scala:137:31]
wire contained_1 = {_ctrls_io_flush_req_bits[63:32], _GEN[25:22], _GEN[1:0]} == 38'h0 | {_ctrls_io_flush_req_bits[63:28], _GEN_0[21:10], _GEN_0[1:0]} == 50'h0; // @[Parameters.scala:137:{31,41,46,59}]
wire [25:0] _GEN_1 = _ctrls_io_flush_req_bits[31:6] ^ 26'h2000002; // @[Parameters.scala:137:31]
wire [21:0] _GEN_2 = _ctrls_io_flush_req_bits[27:6] ^ 22'h200002; // @[Parameters.scala:137:31]
wire contained_2 = {_ctrls_io_flush_req_bits[63:32], _GEN_1[25:22], _GEN_1[1:0]} == 38'h0 | {_ctrls_io_flush_req_bits[63:28], _GEN_2[21:10], _GEN_2[1:0]} == 50'h0; // @[Parameters.scala:137:{31,41,46,59}]
wire [25:0] _GEN_3 = _ctrls_io_flush_req_bits[31:6] ^ 26'h2000003; // @[Parameters.scala:137:31]
wire [21:0] _GEN_4 = _ctrls_io_flush_req_bits[27:6] ^ 22'h200003; // @[Parameters.scala:137:31]
wire contained_3 = {_ctrls_io_flush_req_bits[63:32], _GEN_3[25:22], _GEN_3[1:0]} == 38'h0 | {_ctrls_io_flush_req_bits[63:28], _GEN_4[21:10], _GEN_4[1:0]} == 50'h0; // @[Parameters.scala:137:{31,41,46,59}]
InclusiveCacheControl ctrls ( // @[InclusiveCache.scala:103:43]
.clock (clock),
.reset (reset),
.auto_ctrl_in_a_ready (auto_ctrls_ctrl_in_a_ready),
.auto_ctrl_in_a_valid (auto_ctrls_ctrl_in_a_valid),
.auto_ctrl_in_a_bits_opcode (auto_ctrls_ctrl_in_a_bits_opcode),
.auto_ctrl_in_a_bits_param (auto_ctrls_ctrl_in_a_bits_param),
.auto_ctrl_in_a_bits_size (auto_ctrls_ctrl_in_a_bits_size),
.auto_ctrl_in_a_bits_source (auto_ctrls_ctrl_in_a_bits_source),
.auto_ctrl_in_a_bits_address (auto_ctrls_ctrl_in_a_bits_address),
.auto_ctrl_in_a_bits_mask (auto_ctrls_ctrl_in_a_bits_mask),
.auto_ctrl_in_a_bits_data (auto_ctrls_ctrl_in_a_bits_data),
.auto_ctrl_in_a_bits_corrupt (auto_ctrls_ctrl_in_a_bits_corrupt),
.auto_ctrl_in_d_ready (auto_ctrls_ctrl_in_d_ready),
.auto_ctrl_in_d_valid (auto_ctrls_ctrl_in_d_valid),
.auto_ctrl_in_d_bits_opcode (auto_ctrls_ctrl_in_d_bits_opcode),
.auto_ctrl_in_d_bits_size (auto_ctrls_ctrl_in_d_bits_size),
.auto_ctrl_in_d_bits_source (auto_ctrls_ctrl_in_d_bits_source),
.auto_ctrl_in_d_bits_data (auto_ctrls_ctrl_in_d_bits_data),
.io_flush_match (contained_3 | contained_2 | contained_1 | contained), // @[InclusiveCache.scala:169:67, :170:{26,55}]
.io_flush_req_ready (contained_3 & _inclusive_cache_bank_sched_3_io_req_ready | contained_2 & _inclusive_cache_bank_sched_2_io_req_ready | contained_1 & _inclusive_cache_bank_sched_1_io_req_ready | contained & _inclusive_cache_bank_sched_io_req_ready), // @[InclusiveCache.scala:137:29, :169:67, :174:{25,48,81}]
.io_flush_req_valid (_ctrls_io_flush_req_valid),
.io_flush_req_bits (_ctrls_io_flush_req_bits),
.io_flush_resp (_inclusive_cache_bank_sched_3_io_resp_valid | _inclusive_cache_bank_sched_2_io_resp_valid | _inclusive_cache_bank_sched_1_io_resp_valid | _inclusive_cache_bank_sched_io_resp_valid) // @[InclusiveCache.scala:137:29, :176:{36,64}]
); // @[InclusiveCache.scala:103:43]
TLMonitor_46 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (_inclusive_cache_bank_sched_io_in_a_ready), // @[InclusiveCache.scala:137:29]
.io_in_a_valid (auto_in_0_a_valid),
.io_in_a_bits_opcode (auto_in_0_a_bits_opcode),
.io_in_a_bits_param (auto_in_0_a_bits_param),
.io_in_a_bits_size (auto_in_0_a_bits_size),
.io_in_a_bits_source (auto_in_0_a_bits_source),
.io_in_a_bits_address (auto_in_0_a_bits_address),
.io_in_a_bits_mask (auto_in_0_a_bits_mask),
.io_in_a_bits_corrupt (auto_in_0_a_bits_corrupt),
.io_in_b_ready (auto_in_0_b_ready),
.io_in_b_valid (_inclusive_cache_bank_sched_io_in_b_valid), // @[InclusiveCache.scala:137:29]
.io_in_b_bits_param (_inclusive_cache_bank_sched_io_in_b_bits_param), // @[InclusiveCache.scala:137:29]
.io_in_b_bits_source (_inclusive_cache_bank_sched_io_in_b_bits_source), // @[InclusiveCache.scala:137:29]
.io_in_b_bits_address (_inclusive_cache_bank_sched_io_in_b_bits_address), // @[InclusiveCache.scala:137:29]
.io_in_c_ready (_inclusive_cache_bank_sched_io_in_c_ready), // @[InclusiveCache.scala:137:29]
.io_in_c_valid (auto_in_0_c_valid),
.io_in_c_bits_opcode (auto_in_0_c_bits_opcode),
.io_in_c_bits_param (auto_in_0_c_bits_param),
.io_in_c_bits_size (auto_in_0_c_bits_size),
.io_in_c_bits_source (auto_in_0_c_bits_source),
.io_in_c_bits_address (auto_in_0_c_bits_address),
.io_in_c_bits_corrupt (auto_in_0_c_bits_corrupt),
.io_in_d_ready (auto_in_0_d_ready),
.io_in_d_valid (_inclusive_cache_bank_sched_io_in_d_valid), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_opcode (_inclusive_cache_bank_sched_io_in_d_bits_opcode), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_param (_inclusive_cache_bank_sched_io_in_d_bits_param), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_size (_inclusive_cache_bank_sched_io_in_d_bits_size), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_source (_inclusive_cache_bank_sched_io_in_d_bits_source), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_sink (_inclusive_cache_bank_sched_io_in_d_bits_sink), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_denied (_inclusive_cache_bank_sched_io_in_d_bits_denied), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_corrupt (_inclusive_cache_bank_sched_io_in_d_bits_corrupt), // @[InclusiveCache.scala:137:29]
.io_in_e_valid (auto_in_0_e_valid),
.io_in_e_bits_sink (auto_in_0_e_bits_sink)
); // @[Nodes.scala:27:25]
TLMonitor_47 monitor_1 ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (_inclusive_cache_bank_sched_1_io_in_a_ready), // @[InclusiveCache.scala:137:29]
.io_in_a_valid (auto_in_1_a_valid),
.io_in_a_bits_opcode (auto_in_1_a_bits_opcode),
.io_in_a_bits_param (auto_in_1_a_bits_param),
.io_in_a_bits_size (auto_in_1_a_bits_size),
.io_in_a_bits_source (auto_in_1_a_bits_source),
.io_in_a_bits_address (auto_in_1_a_bits_address),
.io_in_a_bits_mask (auto_in_1_a_bits_mask),
.io_in_a_bits_corrupt (auto_in_1_a_bits_corrupt),
.io_in_b_ready (auto_in_1_b_ready),
.io_in_b_valid (_inclusive_cache_bank_sched_1_io_in_b_valid), // @[InclusiveCache.scala:137:29]
.io_in_b_bits_param (_inclusive_cache_bank_sched_1_io_in_b_bits_param), // @[InclusiveCache.scala:137:29]
.io_in_b_bits_source (_inclusive_cache_bank_sched_1_io_in_b_bits_source), // @[InclusiveCache.scala:137:29]
.io_in_b_bits_address (nodeIn_1_b_bits_address), // @[Parameters.scala:248:14]
.io_in_c_ready (_inclusive_cache_bank_sched_1_io_in_c_ready), // @[InclusiveCache.scala:137:29]
.io_in_c_valid (auto_in_1_c_valid),
.io_in_c_bits_opcode (auto_in_1_c_bits_opcode),
.io_in_c_bits_param (auto_in_1_c_bits_param),
.io_in_c_bits_size (auto_in_1_c_bits_size),
.io_in_c_bits_source (auto_in_1_c_bits_source),
.io_in_c_bits_address (auto_in_1_c_bits_address),
.io_in_c_bits_corrupt (auto_in_1_c_bits_corrupt),
.io_in_d_ready (auto_in_1_d_ready),
.io_in_d_valid (_inclusive_cache_bank_sched_1_io_in_d_valid), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_opcode (_inclusive_cache_bank_sched_1_io_in_d_bits_opcode), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_param (_inclusive_cache_bank_sched_1_io_in_d_bits_param), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_size (_inclusive_cache_bank_sched_1_io_in_d_bits_size), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_source (_inclusive_cache_bank_sched_1_io_in_d_bits_source), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_sink (_inclusive_cache_bank_sched_1_io_in_d_bits_sink), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_denied (_inclusive_cache_bank_sched_1_io_in_d_bits_denied), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_corrupt (_inclusive_cache_bank_sched_1_io_in_d_bits_corrupt), // @[InclusiveCache.scala:137:29]
.io_in_e_valid (auto_in_1_e_valid),
.io_in_e_bits_sink (auto_in_1_e_bits_sink)
); // @[Nodes.scala:27:25]
TLMonitor_48 monitor_2 ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (_inclusive_cache_bank_sched_2_io_in_a_ready), // @[InclusiveCache.scala:137:29]
.io_in_a_valid (auto_in_2_a_valid),
.io_in_a_bits_opcode (auto_in_2_a_bits_opcode),
.io_in_a_bits_param (auto_in_2_a_bits_param),
.io_in_a_bits_size (auto_in_2_a_bits_size),
.io_in_a_bits_source (auto_in_2_a_bits_source),
.io_in_a_bits_address (auto_in_2_a_bits_address),
.io_in_a_bits_mask (auto_in_2_a_bits_mask),
.io_in_a_bits_corrupt (auto_in_2_a_bits_corrupt),
.io_in_b_ready (auto_in_2_b_ready),
.io_in_b_valid (_inclusive_cache_bank_sched_2_io_in_b_valid), // @[InclusiveCache.scala:137:29]
.io_in_b_bits_param (_inclusive_cache_bank_sched_2_io_in_b_bits_param), // @[InclusiveCache.scala:137:29]
.io_in_b_bits_source (_inclusive_cache_bank_sched_2_io_in_b_bits_source), // @[InclusiveCache.scala:137:29]
.io_in_b_bits_address (nodeIn_2_b_bits_address), // @[Parameters.scala:248:14]
.io_in_c_ready (_inclusive_cache_bank_sched_2_io_in_c_ready), // @[InclusiveCache.scala:137:29]
.io_in_c_valid (auto_in_2_c_valid),
.io_in_c_bits_opcode (auto_in_2_c_bits_opcode),
.io_in_c_bits_param (auto_in_2_c_bits_param),
.io_in_c_bits_size (auto_in_2_c_bits_size),
.io_in_c_bits_source (auto_in_2_c_bits_source),
.io_in_c_bits_address (auto_in_2_c_bits_address),
.io_in_c_bits_corrupt (auto_in_2_c_bits_corrupt),
.io_in_d_ready (auto_in_2_d_ready),
.io_in_d_valid (_inclusive_cache_bank_sched_2_io_in_d_valid), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_opcode (_inclusive_cache_bank_sched_2_io_in_d_bits_opcode), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_param (_inclusive_cache_bank_sched_2_io_in_d_bits_param), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_size (_inclusive_cache_bank_sched_2_io_in_d_bits_size), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_source (_inclusive_cache_bank_sched_2_io_in_d_bits_source), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_sink (_inclusive_cache_bank_sched_2_io_in_d_bits_sink), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_denied (_inclusive_cache_bank_sched_2_io_in_d_bits_denied), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_corrupt (_inclusive_cache_bank_sched_2_io_in_d_bits_corrupt), // @[InclusiveCache.scala:137:29]
.io_in_e_valid (auto_in_2_e_valid),
.io_in_e_bits_sink (auto_in_2_e_bits_sink)
); // @[Nodes.scala:27:25]
TLMonitor_49 monitor_3 ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (_inclusive_cache_bank_sched_3_io_in_a_ready), // @[InclusiveCache.scala:137:29]
.io_in_a_valid (auto_in_3_a_valid),
.io_in_a_bits_opcode (auto_in_3_a_bits_opcode),
.io_in_a_bits_param (auto_in_3_a_bits_param),
.io_in_a_bits_size (auto_in_3_a_bits_size),
.io_in_a_bits_source (auto_in_3_a_bits_source),
.io_in_a_bits_address (auto_in_3_a_bits_address),
.io_in_a_bits_mask (auto_in_3_a_bits_mask),
.io_in_a_bits_corrupt (auto_in_3_a_bits_corrupt),
.io_in_b_ready (auto_in_3_b_ready),
.io_in_b_valid (_inclusive_cache_bank_sched_3_io_in_b_valid), // @[InclusiveCache.scala:137:29]
.io_in_b_bits_param (_inclusive_cache_bank_sched_3_io_in_b_bits_param), // @[InclusiveCache.scala:137:29]
.io_in_b_bits_source (_inclusive_cache_bank_sched_3_io_in_b_bits_source), // @[InclusiveCache.scala:137:29]
.io_in_b_bits_address (nodeIn_3_b_bits_address), // @[Parameters.scala:248:14]
.io_in_c_ready (_inclusive_cache_bank_sched_3_io_in_c_ready), // @[InclusiveCache.scala:137:29]
.io_in_c_valid (auto_in_3_c_valid),
.io_in_c_bits_opcode (auto_in_3_c_bits_opcode),
.io_in_c_bits_param (auto_in_3_c_bits_param),
.io_in_c_bits_size (auto_in_3_c_bits_size),
.io_in_c_bits_source (auto_in_3_c_bits_source),
.io_in_c_bits_address (auto_in_3_c_bits_address),
.io_in_c_bits_corrupt (auto_in_3_c_bits_corrupt),
.io_in_d_ready (auto_in_3_d_ready),
.io_in_d_valid (_inclusive_cache_bank_sched_3_io_in_d_valid), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_opcode (_inclusive_cache_bank_sched_3_io_in_d_bits_opcode), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_param (_inclusive_cache_bank_sched_3_io_in_d_bits_param), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_size (_inclusive_cache_bank_sched_3_io_in_d_bits_size), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_source (_inclusive_cache_bank_sched_3_io_in_d_bits_source), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_sink (_inclusive_cache_bank_sched_3_io_in_d_bits_sink), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_denied (_inclusive_cache_bank_sched_3_io_in_d_bits_denied), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_corrupt (_inclusive_cache_bank_sched_3_io_in_d_bits_corrupt), // @[InclusiveCache.scala:137:29]
.io_in_e_valid (auto_in_3_e_valid),
.io_in_e_bits_sink (auto_in_3_e_bits_sink)
); // @[Nodes.scala:27:25]
InclusiveCacheBankScheduler inclusive_cache_bank_sched ( // @[InclusiveCache.scala:137:29]
.clock (clock),
.reset (reset),
.io_in_a_ready (_inclusive_cache_bank_sched_io_in_a_ready),
.io_in_a_valid (auto_in_0_a_valid),
.io_in_a_bits_opcode (auto_in_0_a_bits_opcode),
.io_in_a_bits_param (auto_in_0_a_bits_param),
.io_in_a_bits_size (auto_in_0_a_bits_size),
.io_in_a_bits_source (auto_in_0_a_bits_source),
.io_in_a_bits_address (auto_in_0_a_bits_address),
.io_in_a_bits_mask (auto_in_0_a_bits_mask),
.io_in_a_bits_data (auto_in_0_a_bits_data),
.io_in_a_bits_corrupt (auto_in_0_a_bits_corrupt),
.io_in_b_ready (auto_in_0_b_ready),
.io_in_b_valid (_inclusive_cache_bank_sched_io_in_b_valid),
.io_in_b_bits_param (_inclusive_cache_bank_sched_io_in_b_bits_param),
.io_in_b_bits_source (_inclusive_cache_bank_sched_io_in_b_bits_source),
.io_in_b_bits_address (_inclusive_cache_bank_sched_io_in_b_bits_address),
.io_in_c_ready (_inclusive_cache_bank_sched_io_in_c_ready),
.io_in_c_valid (auto_in_0_c_valid),
.io_in_c_bits_opcode (auto_in_0_c_bits_opcode),
.io_in_c_bits_param (auto_in_0_c_bits_param),
.io_in_c_bits_size (auto_in_0_c_bits_size),
.io_in_c_bits_source (auto_in_0_c_bits_source),
.io_in_c_bits_address (auto_in_0_c_bits_address),
.io_in_c_bits_data (auto_in_0_c_bits_data),
.io_in_c_bits_corrupt (auto_in_0_c_bits_corrupt),
.io_in_d_ready (auto_in_0_d_ready),
.io_in_d_valid (_inclusive_cache_bank_sched_io_in_d_valid),
.io_in_d_bits_opcode (_inclusive_cache_bank_sched_io_in_d_bits_opcode),
.io_in_d_bits_param (_inclusive_cache_bank_sched_io_in_d_bits_param),
.io_in_d_bits_size (_inclusive_cache_bank_sched_io_in_d_bits_size),
.io_in_d_bits_source (_inclusive_cache_bank_sched_io_in_d_bits_source),
.io_in_d_bits_sink (_inclusive_cache_bank_sched_io_in_d_bits_sink),
.io_in_d_bits_denied (_inclusive_cache_bank_sched_io_in_d_bits_denied),
.io_in_d_bits_data (auto_in_0_d_bits_data),
.io_in_d_bits_corrupt (_inclusive_cache_bank_sched_io_in_d_bits_corrupt),
.io_in_e_valid (auto_in_0_e_valid),
.io_in_e_bits_sink (auto_in_0_e_bits_sink),
.io_out_a_ready (auto_out_0_a_ready),
.io_out_a_valid (auto_out_0_a_valid),
.io_out_a_bits_opcode (auto_out_0_a_bits_opcode),
.io_out_a_bits_param (auto_out_0_a_bits_param),
.io_out_a_bits_size (auto_out_0_a_bits_size),
.io_out_a_bits_source (auto_out_0_a_bits_source),
.io_out_a_bits_address (auto_out_0_a_bits_address),
.io_out_a_bits_mask (auto_out_0_a_bits_mask),
.io_out_a_bits_data (auto_out_0_a_bits_data),
.io_out_a_bits_corrupt (auto_out_0_a_bits_corrupt),
.io_out_c_ready (auto_out_0_c_ready),
.io_out_c_valid (auto_out_0_c_valid),
.io_out_c_bits_opcode (auto_out_0_c_bits_opcode),
.io_out_c_bits_param (auto_out_0_c_bits_param),
.io_out_c_bits_size (auto_out_0_c_bits_size),
.io_out_c_bits_source (auto_out_0_c_bits_source),
.io_out_c_bits_address (auto_out_0_c_bits_address),
.io_out_c_bits_data (auto_out_0_c_bits_data),
.io_out_c_bits_corrupt (auto_out_0_c_bits_corrupt),
.io_out_d_ready (auto_out_0_d_ready),
.io_out_d_valid (auto_out_0_d_valid),
.io_out_d_bits_opcode (auto_out_0_d_bits_opcode),
.io_out_d_bits_param (auto_out_0_d_bits_param),
.io_out_d_bits_size (auto_out_0_d_bits_size),
.io_out_d_bits_source (auto_out_0_d_bits_source),
.io_out_d_bits_sink (auto_out_0_d_bits_sink),
.io_out_d_bits_denied (auto_out_0_d_bits_denied),
.io_out_d_bits_data (auto_out_0_d_bits_data),
.io_out_d_bits_corrupt (auto_out_0_d_bits_corrupt),
.io_out_e_valid (auto_out_0_e_valid),
.io_out_e_bits_sink (auto_out_0_e_bits_sink),
.io_req_ready (_inclusive_cache_bank_sched_io_req_ready),
.io_req_valid (contained & _ctrls_io_flush_req_valid), // @[InclusiveCache.scala:103:43, :169:67, :172:41]
.io_req_bits_address (_ctrls_io_flush_req_bits[31:0]), // @[Parameters.scala:137:31]
.io_resp_valid (_inclusive_cache_bank_sched_io_resp_valid)
); // @[InclusiveCache.scala:137:29]
InclusiveCacheBankScheduler inclusive_cache_bank_sched_1 ( // @[InclusiveCache.scala:137:29]
.clock (clock),
.reset (reset),
.io_in_a_ready (_inclusive_cache_bank_sched_1_io_in_a_ready),
.io_in_a_valid (auto_in_1_a_valid),
.io_in_a_bits_opcode (auto_in_1_a_bits_opcode),
.io_in_a_bits_param (auto_in_1_a_bits_param),
.io_in_a_bits_size (auto_in_1_a_bits_size),
.io_in_a_bits_source (auto_in_1_a_bits_source),
.io_in_a_bits_address (auto_in_1_a_bits_address),
.io_in_a_bits_mask (auto_in_1_a_bits_mask),
.io_in_a_bits_data (auto_in_1_a_bits_data),
.io_in_a_bits_corrupt (auto_in_1_a_bits_corrupt),
.io_in_b_ready (auto_in_1_b_ready),
.io_in_b_valid (_inclusive_cache_bank_sched_1_io_in_b_valid),
.io_in_b_bits_param (_inclusive_cache_bank_sched_1_io_in_b_bits_param),
.io_in_b_bits_source (_inclusive_cache_bank_sched_1_io_in_b_bits_source),
.io_in_b_bits_address (_inclusive_cache_bank_sched_1_io_in_b_bits_address),
.io_in_c_ready (_inclusive_cache_bank_sched_1_io_in_c_ready),
.io_in_c_valid (auto_in_1_c_valid),
.io_in_c_bits_opcode (auto_in_1_c_bits_opcode),
.io_in_c_bits_param (auto_in_1_c_bits_param),
.io_in_c_bits_size (auto_in_1_c_bits_size),
.io_in_c_bits_source (auto_in_1_c_bits_source),
.io_in_c_bits_address (auto_in_1_c_bits_address),
.io_in_c_bits_data (auto_in_1_c_bits_data),
.io_in_c_bits_corrupt (auto_in_1_c_bits_corrupt),
.io_in_d_ready (auto_in_1_d_ready),
.io_in_d_valid (_inclusive_cache_bank_sched_1_io_in_d_valid),
.io_in_d_bits_opcode (_inclusive_cache_bank_sched_1_io_in_d_bits_opcode),
.io_in_d_bits_param (_inclusive_cache_bank_sched_1_io_in_d_bits_param),
.io_in_d_bits_size (_inclusive_cache_bank_sched_1_io_in_d_bits_size),
.io_in_d_bits_source (_inclusive_cache_bank_sched_1_io_in_d_bits_source),
.io_in_d_bits_sink (_inclusive_cache_bank_sched_1_io_in_d_bits_sink),
.io_in_d_bits_denied (_inclusive_cache_bank_sched_1_io_in_d_bits_denied),
.io_in_d_bits_data (auto_in_1_d_bits_data),
.io_in_d_bits_corrupt (_inclusive_cache_bank_sched_1_io_in_d_bits_corrupt),
.io_in_e_valid (auto_in_1_e_valid),
.io_in_e_bits_sink (auto_in_1_e_bits_sink),
.io_out_a_ready (auto_out_1_a_ready),
.io_out_a_valid (auto_out_1_a_valid),
.io_out_a_bits_opcode (auto_out_1_a_bits_opcode),
.io_out_a_bits_param (auto_out_1_a_bits_param),
.io_out_a_bits_size (auto_out_1_a_bits_size),
.io_out_a_bits_source (auto_out_1_a_bits_source),
.io_out_a_bits_address (_inclusive_cache_bank_sched_1_io_out_a_bits_address),
.io_out_a_bits_mask (auto_out_1_a_bits_mask),
.io_out_a_bits_data (auto_out_1_a_bits_data),
.io_out_a_bits_corrupt (auto_out_1_a_bits_corrupt),
.io_out_c_ready (auto_out_1_c_ready),
.io_out_c_valid (auto_out_1_c_valid),
.io_out_c_bits_opcode (auto_out_1_c_bits_opcode),
.io_out_c_bits_param (auto_out_1_c_bits_param),
.io_out_c_bits_size (auto_out_1_c_bits_size),
.io_out_c_bits_source (auto_out_1_c_bits_source),
.io_out_c_bits_address (_inclusive_cache_bank_sched_1_io_out_c_bits_address),
.io_out_c_bits_data (auto_out_1_c_bits_data),
.io_out_c_bits_corrupt (auto_out_1_c_bits_corrupt),
.io_out_d_ready (auto_out_1_d_ready),
.io_out_d_valid (auto_out_1_d_valid),
.io_out_d_bits_opcode (auto_out_1_d_bits_opcode),
.io_out_d_bits_param (auto_out_1_d_bits_param),
.io_out_d_bits_size (auto_out_1_d_bits_size),
.io_out_d_bits_source (auto_out_1_d_bits_source),
.io_out_d_bits_sink (auto_out_1_d_bits_sink),
.io_out_d_bits_denied (auto_out_1_d_bits_denied),
.io_out_d_bits_data (auto_out_1_d_bits_data),
.io_out_d_bits_corrupt (auto_out_1_d_bits_corrupt),
.io_out_e_valid (auto_out_1_e_valid),
.io_out_e_bits_sink (auto_out_1_e_bits_sink),
.io_req_ready (_inclusive_cache_bank_sched_1_io_req_ready),
.io_req_valid (contained_1 & _ctrls_io_flush_req_valid), // @[InclusiveCache.scala:103:43, :169:67, :172:41]
.io_req_bits_address (_ctrls_io_flush_req_bits[31:0]), // @[Parameters.scala:137:31]
.io_resp_valid (_inclusive_cache_bank_sched_1_io_resp_valid)
); // @[InclusiveCache.scala:137:29]
InclusiveCacheBankScheduler inclusive_cache_bank_sched_2 ( // @[InclusiveCache.scala:137:29]
.clock (clock),
.reset (reset),
.io_in_a_ready (_inclusive_cache_bank_sched_2_io_in_a_ready),
.io_in_a_valid (auto_in_2_a_valid),
.io_in_a_bits_opcode (auto_in_2_a_bits_opcode),
.io_in_a_bits_param (auto_in_2_a_bits_param),
.io_in_a_bits_size (auto_in_2_a_bits_size),
.io_in_a_bits_source (auto_in_2_a_bits_source),
.io_in_a_bits_address (auto_in_2_a_bits_address),
.io_in_a_bits_mask (auto_in_2_a_bits_mask),
.io_in_a_bits_data (auto_in_2_a_bits_data),
.io_in_a_bits_corrupt (auto_in_2_a_bits_corrupt),
.io_in_b_ready (auto_in_2_b_ready),
.io_in_b_valid (_inclusive_cache_bank_sched_2_io_in_b_valid),
.io_in_b_bits_param (_inclusive_cache_bank_sched_2_io_in_b_bits_param),
.io_in_b_bits_source (_inclusive_cache_bank_sched_2_io_in_b_bits_source),
.io_in_b_bits_address (_inclusive_cache_bank_sched_2_io_in_b_bits_address),
.io_in_c_ready (_inclusive_cache_bank_sched_2_io_in_c_ready),
.io_in_c_valid (auto_in_2_c_valid),
.io_in_c_bits_opcode (auto_in_2_c_bits_opcode),
.io_in_c_bits_param (auto_in_2_c_bits_param),
.io_in_c_bits_size (auto_in_2_c_bits_size),
.io_in_c_bits_source (auto_in_2_c_bits_source),
.io_in_c_bits_address (auto_in_2_c_bits_address),
.io_in_c_bits_data (auto_in_2_c_bits_data),
.io_in_c_bits_corrupt (auto_in_2_c_bits_corrupt),
.io_in_d_ready (auto_in_2_d_ready),
.io_in_d_valid (_inclusive_cache_bank_sched_2_io_in_d_valid),
.io_in_d_bits_opcode (_inclusive_cache_bank_sched_2_io_in_d_bits_opcode),
.io_in_d_bits_param (_inclusive_cache_bank_sched_2_io_in_d_bits_param),
.io_in_d_bits_size (_inclusive_cache_bank_sched_2_io_in_d_bits_size),
.io_in_d_bits_source (_inclusive_cache_bank_sched_2_io_in_d_bits_source),
.io_in_d_bits_sink (_inclusive_cache_bank_sched_2_io_in_d_bits_sink),
.io_in_d_bits_denied (_inclusive_cache_bank_sched_2_io_in_d_bits_denied),
.io_in_d_bits_data (auto_in_2_d_bits_data),
.io_in_d_bits_corrupt (_inclusive_cache_bank_sched_2_io_in_d_bits_corrupt),
.io_in_e_valid (auto_in_2_e_valid),
.io_in_e_bits_sink (auto_in_2_e_bits_sink),
.io_out_a_ready (auto_out_2_a_ready),
.io_out_a_valid (auto_out_2_a_valid),
.io_out_a_bits_opcode (auto_out_2_a_bits_opcode),
.io_out_a_bits_param (auto_out_2_a_bits_param),
.io_out_a_bits_size (auto_out_2_a_bits_size),
.io_out_a_bits_source (auto_out_2_a_bits_source),
.io_out_a_bits_address (_inclusive_cache_bank_sched_2_io_out_a_bits_address),
.io_out_a_bits_mask (auto_out_2_a_bits_mask),
.io_out_a_bits_data (auto_out_2_a_bits_data),
.io_out_a_bits_corrupt (auto_out_2_a_bits_corrupt),
.io_out_c_ready (auto_out_2_c_ready),
.io_out_c_valid (auto_out_2_c_valid),
.io_out_c_bits_opcode (auto_out_2_c_bits_opcode),
.io_out_c_bits_param (auto_out_2_c_bits_param),
.io_out_c_bits_size (auto_out_2_c_bits_size),
.io_out_c_bits_source (auto_out_2_c_bits_source),
.io_out_c_bits_address (_inclusive_cache_bank_sched_2_io_out_c_bits_address),
.io_out_c_bits_data (auto_out_2_c_bits_data),
.io_out_c_bits_corrupt (auto_out_2_c_bits_corrupt),
.io_out_d_ready (auto_out_2_d_ready),
.io_out_d_valid (auto_out_2_d_valid),
.io_out_d_bits_opcode (auto_out_2_d_bits_opcode),
.io_out_d_bits_param (auto_out_2_d_bits_param),
.io_out_d_bits_size (auto_out_2_d_bits_size),
.io_out_d_bits_source (auto_out_2_d_bits_source),
.io_out_d_bits_sink (auto_out_2_d_bits_sink),
.io_out_d_bits_denied (auto_out_2_d_bits_denied),
.io_out_d_bits_data (auto_out_2_d_bits_data),
.io_out_d_bits_corrupt (auto_out_2_d_bits_corrupt),
.io_out_e_valid (auto_out_2_e_valid),
.io_out_e_bits_sink (auto_out_2_e_bits_sink),
.io_req_ready (_inclusive_cache_bank_sched_2_io_req_ready),
.io_req_valid (contained_2 & _ctrls_io_flush_req_valid), // @[InclusiveCache.scala:103:43, :169:67, :172:41]
.io_req_bits_address (_ctrls_io_flush_req_bits[31:0]), // @[Parameters.scala:137:31]
.io_resp_valid (_inclusive_cache_bank_sched_2_io_resp_valid)
); // @[InclusiveCache.scala:137:29]
InclusiveCacheBankScheduler inclusive_cache_bank_sched_3 ( // @[InclusiveCache.scala:137:29]
.clock (clock),
.reset (reset),
.io_in_a_ready (_inclusive_cache_bank_sched_3_io_in_a_ready),
.io_in_a_valid (auto_in_3_a_valid),
.io_in_a_bits_opcode (auto_in_3_a_bits_opcode),
.io_in_a_bits_param (auto_in_3_a_bits_param),
.io_in_a_bits_size (auto_in_3_a_bits_size),
.io_in_a_bits_source (auto_in_3_a_bits_source),
.io_in_a_bits_address (auto_in_3_a_bits_address),
.io_in_a_bits_mask (auto_in_3_a_bits_mask),
.io_in_a_bits_data (auto_in_3_a_bits_data),
.io_in_a_bits_corrupt (auto_in_3_a_bits_corrupt),
.io_in_b_ready (auto_in_3_b_ready),
.io_in_b_valid (_inclusive_cache_bank_sched_3_io_in_b_valid),
.io_in_b_bits_param (_inclusive_cache_bank_sched_3_io_in_b_bits_param),
.io_in_b_bits_source (_inclusive_cache_bank_sched_3_io_in_b_bits_source),
.io_in_b_bits_address (_inclusive_cache_bank_sched_3_io_in_b_bits_address),
.io_in_c_ready (_inclusive_cache_bank_sched_3_io_in_c_ready),
.io_in_c_valid (auto_in_3_c_valid),
.io_in_c_bits_opcode (auto_in_3_c_bits_opcode),
.io_in_c_bits_param (auto_in_3_c_bits_param),
.io_in_c_bits_size (auto_in_3_c_bits_size),
.io_in_c_bits_source (auto_in_3_c_bits_source),
.io_in_c_bits_address (auto_in_3_c_bits_address),
.io_in_c_bits_data (auto_in_3_c_bits_data),
.io_in_c_bits_corrupt (auto_in_3_c_bits_corrupt),
.io_in_d_ready (auto_in_3_d_ready),
.io_in_d_valid (_inclusive_cache_bank_sched_3_io_in_d_valid),
.io_in_d_bits_opcode (_inclusive_cache_bank_sched_3_io_in_d_bits_opcode),
.io_in_d_bits_param (_inclusive_cache_bank_sched_3_io_in_d_bits_param),
.io_in_d_bits_size (_inclusive_cache_bank_sched_3_io_in_d_bits_size),
.io_in_d_bits_source (_inclusive_cache_bank_sched_3_io_in_d_bits_source),
.io_in_d_bits_sink (_inclusive_cache_bank_sched_3_io_in_d_bits_sink),
.io_in_d_bits_denied (_inclusive_cache_bank_sched_3_io_in_d_bits_denied),
.io_in_d_bits_data (auto_in_3_d_bits_data),
.io_in_d_bits_corrupt (_inclusive_cache_bank_sched_3_io_in_d_bits_corrupt),
.io_in_e_valid (auto_in_3_e_valid),
.io_in_e_bits_sink (auto_in_3_e_bits_sink),
.io_out_a_ready (auto_out_3_a_ready),
.io_out_a_valid (auto_out_3_a_valid),
.io_out_a_bits_opcode (auto_out_3_a_bits_opcode),
.io_out_a_bits_param (auto_out_3_a_bits_param),
.io_out_a_bits_size (auto_out_3_a_bits_size),
.io_out_a_bits_source (auto_out_3_a_bits_source),
.io_out_a_bits_address (_inclusive_cache_bank_sched_3_io_out_a_bits_address),
.io_out_a_bits_mask (auto_out_3_a_bits_mask),
.io_out_a_bits_data (auto_out_3_a_bits_data),
.io_out_a_bits_corrupt (auto_out_3_a_bits_corrupt),
.io_out_c_ready (auto_out_3_c_ready),
.io_out_c_valid (auto_out_3_c_valid),
.io_out_c_bits_opcode (auto_out_3_c_bits_opcode),
.io_out_c_bits_param (auto_out_3_c_bits_param),
.io_out_c_bits_size (auto_out_3_c_bits_size),
.io_out_c_bits_source (auto_out_3_c_bits_source),
.io_out_c_bits_address (_inclusive_cache_bank_sched_3_io_out_c_bits_address),
.io_out_c_bits_data (auto_out_3_c_bits_data),
.io_out_c_bits_corrupt (auto_out_3_c_bits_corrupt),
.io_out_d_ready (auto_out_3_d_ready),
.io_out_d_valid (auto_out_3_d_valid),
.io_out_d_bits_opcode (auto_out_3_d_bits_opcode),
.io_out_d_bits_param (auto_out_3_d_bits_param),
.io_out_d_bits_size (auto_out_3_d_bits_size),
.io_out_d_bits_source (auto_out_3_d_bits_source),
.io_out_d_bits_sink (auto_out_3_d_bits_sink),
.io_out_d_bits_denied (auto_out_3_d_bits_denied),
.io_out_d_bits_data (auto_out_3_d_bits_data),
.io_out_d_bits_corrupt (auto_out_3_d_bits_corrupt),
.io_out_e_valid (auto_out_3_e_valid),
.io_out_e_bits_sink (auto_out_3_e_bits_sink),
.io_req_ready (_inclusive_cache_bank_sched_3_io_req_ready),
.io_req_valid (contained_3 & _ctrls_io_flush_req_valid), // @[InclusiveCache.scala:103:43, :169:67, :172:41]
.io_req_bits_address (_ctrls_io_flush_req_bits[31:0]), // @[Parameters.scala:137:31]
.io_resp_valid (_inclusive_cache_bank_sched_3_io_resp_valid)
); // @[InclusiveCache.scala:137:29]
assign auto_in_3_a_ready = _inclusive_cache_bank_sched_3_io_in_a_ready; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_3_b_valid = _inclusive_cache_bank_sched_3_io_in_b_valid; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_3_b_bits_param = _inclusive_cache_bank_sched_3_io_in_b_bits_param; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_3_b_bits_source = _inclusive_cache_bank_sched_3_io_in_b_bits_source; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_3_b_bits_address = nodeIn_3_b_bits_address; // @[Parameters.scala:248:14]
assign auto_in_3_c_ready = _inclusive_cache_bank_sched_3_io_in_c_ready; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_3_d_valid = _inclusive_cache_bank_sched_3_io_in_d_valid; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_3_d_bits_opcode = _inclusive_cache_bank_sched_3_io_in_d_bits_opcode; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_3_d_bits_param = _inclusive_cache_bank_sched_3_io_in_d_bits_param; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_3_d_bits_size = _inclusive_cache_bank_sched_3_io_in_d_bits_size; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_3_d_bits_source = _inclusive_cache_bank_sched_3_io_in_d_bits_source; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_3_d_bits_sink = _inclusive_cache_bank_sched_3_io_in_d_bits_sink; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_3_d_bits_denied = _inclusive_cache_bank_sched_3_io_in_d_bits_denied; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_3_d_bits_corrupt = _inclusive_cache_bank_sched_3_io_in_d_bits_corrupt; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_2_a_ready = _inclusive_cache_bank_sched_2_io_in_a_ready; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_2_b_valid = _inclusive_cache_bank_sched_2_io_in_b_valid; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_2_b_bits_param = _inclusive_cache_bank_sched_2_io_in_b_bits_param; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_2_b_bits_source = _inclusive_cache_bank_sched_2_io_in_b_bits_source; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_2_b_bits_address = nodeIn_2_b_bits_address; // @[Parameters.scala:248:14]
assign auto_in_2_c_ready = _inclusive_cache_bank_sched_2_io_in_c_ready; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_2_d_valid = _inclusive_cache_bank_sched_2_io_in_d_valid; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_2_d_bits_opcode = _inclusive_cache_bank_sched_2_io_in_d_bits_opcode; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_2_d_bits_param = _inclusive_cache_bank_sched_2_io_in_d_bits_param; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_2_d_bits_size = _inclusive_cache_bank_sched_2_io_in_d_bits_size; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_2_d_bits_source = _inclusive_cache_bank_sched_2_io_in_d_bits_source; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_2_d_bits_sink = _inclusive_cache_bank_sched_2_io_in_d_bits_sink; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_2_d_bits_denied = _inclusive_cache_bank_sched_2_io_in_d_bits_denied; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_2_d_bits_corrupt = _inclusive_cache_bank_sched_2_io_in_d_bits_corrupt; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_1_a_ready = _inclusive_cache_bank_sched_1_io_in_a_ready; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_1_b_valid = _inclusive_cache_bank_sched_1_io_in_b_valid; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_1_b_bits_param = _inclusive_cache_bank_sched_1_io_in_b_bits_param; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_1_b_bits_source = _inclusive_cache_bank_sched_1_io_in_b_bits_source; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_1_b_bits_address = nodeIn_1_b_bits_address; // @[Parameters.scala:248:14]
assign auto_in_1_c_ready = _inclusive_cache_bank_sched_1_io_in_c_ready; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_1_d_valid = _inclusive_cache_bank_sched_1_io_in_d_valid; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_1_d_bits_opcode = _inclusive_cache_bank_sched_1_io_in_d_bits_opcode; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_1_d_bits_param = _inclusive_cache_bank_sched_1_io_in_d_bits_param; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_1_d_bits_size = _inclusive_cache_bank_sched_1_io_in_d_bits_size; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_1_d_bits_source = _inclusive_cache_bank_sched_1_io_in_d_bits_source; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_1_d_bits_sink = _inclusive_cache_bank_sched_1_io_in_d_bits_sink; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_1_d_bits_denied = _inclusive_cache_bank_sched_1_io_in_d_bits_denied; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_1_d_bits_corrupt = _inclusive_cache_bank_sched_1_io_in_d_bits_corrupt; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_0_a_ready = _inclusive_cache_bank_sched_io_in_a_ready; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_0_b_valid = _inclusive_cache_bank_sched_io_in_b_valid; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_0_b_bits_param = _inclusive_cache_bank_sched_io_in_b_bits_param; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_0_b_bits_source = _inclusive_cache_bank_sched_io_in_b_bits_source; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_0_b_bits_address = _inclusive_cache_bank_sched_io_in_b_bits_address; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_0_c_ready = _inclusive_cache_bank_sched_io_in_c_ready; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_0_d_valid = _inclusive_cache_bank_sched_io_in_d_valid; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_0_d_bits_opcode = _inclusive_cache_bank_sched_io_in_d_bits_opcode; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_0_d_bits_param = _inclusive_cache_bank_sched_io_in_d_bits_param; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_0_d_bits_size = _inclusive_cache_bank_sched_io_in_d_bits_size; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_0_d_bits_source = _inclusive_cache_bank_sched_io_in_d_bits_source; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_0_d_bits_sink = _inclusive_cache_bank_sched_io_in_d_bits_sink; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_0_d_bits_denied = _inclusive_cache_bank_sched_io_in_d_bits_denied; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_0_d_bits_corrupt = _inclusive_cache_bank_sched_io_in_d_bits_corrupt; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_out_3_a_bits_address = {_inclusive_cache_bank_sched_3_io_out_a_bits_address[31:8], _inclusive_cache_bank_sched_3_io_out_a_bits_address[7:0] | 8'hC0}; // @[Parameters.scala:248:14]
assign auto_out_3_c_bits_address = {_inclusive_cache_bank_sched_3_io_out_c_bits_address[31:8], _inclusive_cache_bank_sched_3_io_out_c_bits_address[7:0] | 8'hC0}; // @[Parameters.scala:248:14]
assign auto_out_2_a_bits_address = {_inclusive_cache_bank_sched_2_io_out_a_bits_address[31:8], _inclusive_cache_bank_sched_2_io_out_a_bits_address[7:0] | 8'h80}; // @[Parameters.scala:248:14]
assign auto_out_2_c_bits_address = {_inclusive_cache_bank_sched_2_io_out_c_bits_address[31:8], _inclusive_cache_bank_sched_2_io_out_c_bits_address[7:0] | 8'h80}; // @[Parameters.scala:248:14]
assign auto_out_1_a_bits_address = {_inclusive_cache_bank_sched_1_io_out_a_bits_address[31:7], _inclusive_cache_bank_sched_1_io_out_a_bits_address[6:0] | 7'h40}; // @[Parameters.scala:248:14]
assign auto_out_1_c_bits_address = {_inclusive_cache_bank_sched_1_io_out_c_bits_address[31:7], _inclusive_cache_bank_sched_1_io_out_c_bits_address[6:0] | 7'h40}; // @[Parameters.scala:248:14]
endmodule |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.